1 // SPDX-License-Identifier: GPL-2.0-only
2 //
3 // Driver for Cadence QSPI Controller
4 //
5 // Copyright Altera Corporation (C) 2012-2014. All rights reserved.
6 // Copyright Intel Corporation (C) 2019-2020. All rights reserved.
7 // Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
8
9 #include <linux/clk.h>
10 #include <linux/completion.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/errno.h>
16 #include <linux/firmware/xlnx-zynqmp.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/jiffies.h>
21 #include <linux/kernel.h>
22 #include <linux/log2.h>
23 #include <linux/module.h>
24 #include <linux/of.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/sched.h>
29 #include <linux/spi/spi.h>
30 #include <linux/spi/spi-mem.h>
31 #include <linux/timer.h>
32
33 #define CQSPI_NAME "cadence-qspi"
34 #define CQSPI_MAX_CHIPSELECT 4
35
36 static_assert(CQSPI_MAX_CHIPSELECT <= SPI_DEVICE_CS_CNT_MAX);
37
38 /* Quirks */
39 #define CQSPI_NEEDS_WR_DELAY BIT(0)
40 #define CQSPI_DISABLE_DAC_MODE BIT(1)
41 #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
42 #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
43 #define CQSPI_SLOW_SRAM BIT(4)
44 #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5)
45 #define CQSPI_RD_NO_IRQ BIT(6)
46 #define CQSPI_DMA_SET_MASK BIT(7)
47 #define CQSPI_SUPPORT_DEVICE_RESET BIT(8)
48 #define CQSPI_DISABLE_STIG_MODE BIT(9)
49 #define CQSPI_DISABLE_RUNTIME_PM BIT(10)
50 #define CQSPI_NO_INDIRECT_MODE BIT(11)
51 #define CQSPI_HAS_WR_PROTECT BIT(12)
52
53 /* Capabilities */
54 #define CQSPI_SUPPORTS_OCTAL BIT(0)
55 #define CQSPI_SUPPORTS_QUAD BIT(1)
56
57 #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0)
58
59 enum {
60 CLK_QSPI_REF = 0,
61 CLK_QSPI_APB,
62 CLK_QSPI_AHB,
63 CLK_QSPI_NUM,
64 };
65
66 struct cqspi_st;
67
68 struct cqspi_flash_pdata {
69 struct cqspi_st *cqspi;
70 u32 clk_rate;
71 u32 read_delay;
72 u32 tshsl_ns;
73 u32 tsd2d_ns;
74 u32 tchsh_ns;
75 u32 tslch_ns;
76 u8 cs;
77 };
78
79 static const struct clk_bulk_data cqspi_clks[CLK_QSPI_NUM] = {
80 [CLK_QSPI_APB] = { .id = "apb" },
81 [CLK_QSPI_AHB] = { .id = "ahb" },
82 };
83
84 struct cqspi_st {
85 struct platform_device *pdev;
86 struct spi_controller *host;
87 struct clk_bulk_data clks[CLK_QSPI_NUM];
88 unsigned int sclk;
89
90 void __iomem *iobase;
91 void __iomem *ahb_base;
92 resource_size_t ahb_size;
93 struct completion transfer_complete;
94
95 struct dma_chan *rx_chan;
96 struct completion rx_dma_complete;
97 dma_addr_t mmap_phys_base;
98
99 int current_cs;
100 unsigned long master_ref_clk_hz;
101 bool is_decoded_cs;
102 u32 fifo_depth;
103 u32 fifo_width;
104 u32 num_chipselect;
105 bool rclk_en;
106 u32 trigger_address;
107 u32 wr_delay;
108 bool use_direct_mode;
109 bool use_direct_mode_wr;
110 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
111 bool use_dma_read;
112 u32 pd_dev_id;
113 bool wr_completion;
114 bool slow_sram;
115 bool apb_ahb_hazard;
116
117 bool is_jh7110; /* Flag for StarFive JH7110 SoC */
118 bool is_rzn1; /* Flag for Renesas RZ/N1 SoC */
119 bool disable_stig_mode;
120 refcount_t refcount;
121 refcount_t inflight_ops;
122
123 const struct cqspi_driver_platdata *ddata;
124 };
125
126 struct cqspi_driver_platdata {
127 u32 hwcaps_mask;
128 u16 quirks;
129 int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
130 u_char *rxbuf, loff_t from_addr, size_t n_rx);
131 u32 (*get_dma_status)(struct cqspi_st *cqspi);
132 };
133
134 /* Operation timeout value */
135 #define CQSPI_TIMEOUT_MS 500
136 #define CQSPI_READ_TIMEOUT_MS 10
137 #define CQSPI_BUSYWAIT_TIMEOUT_US 500
138
139 /* Runtime_pm autosuspend delay */
140 #define CQSPI_AUTOSUSPEND_TIMEOUT 2000
141
142 #define CQSPI_DUMMY_CLKS_PER_BYTE 8
143 #define CQSPI_DUMMY_BYTES_MAX 4
144 #define CQSPI_DUMMY_CLKS_MAX 31
145
146 #define CQSPI_STIG_DATA_LEN_MAX 8
147
148 /* Register map */
149 #define CQSPI_REG_CONFIG 0x00
150 #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
151 #define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7)
152 #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
153 #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
154 #define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
155 #define CQSPI_REG_CONFIG_BAUD_LSB 19
156 #define CQSPI_REG_CONFIG_DTR_PROTO BIT(24)
157 #define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30)
158 #define CQSPI_REG_CONFIG_IDLE_LSB 31
159 #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
160 #define CQSPI_REG_CONFIG_BAUD_MASK 0xF
161 #define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5)
162 #define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6)
163
164 #define CQSPI_REG_RD_INSTR 0x04
165 #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
166 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
167 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
168 #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
169 #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
170 #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
171 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
172 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
173 #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
174 #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
175
176 #define CQSPI_REG_WR_INSTR 0x08
177 #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
178 #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
179 #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
180
181 #define CQSPI_REG_DELAY 0x0C
182 #define CQSPI_REG_DELAY_TSLCH_LSB 0
183 #define CQSPI_REG_DELAY_TCHSH_LSB 8
184 #define CQSPI_REG_DELAY_TSD2D_LSB 16
185 #define CQSPI_REG_DELAY_TSHSL_LSB 24
186 #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
187 #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
188 #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
189 #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
190
191 #define CQSPI_REG_READCAPTURE 0x10
192 #define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
193 #define CQSPI_REG_READCAPTURE_DELAY_LSB 1
194 #define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
195
196 #define CQSPI_REG_SIZE 0x14
197 #define CQSPI_REG_SIZE_ADDRESS_LSB 0
198 #define CQSPI_REG_SIZE_PAGE_LSB 4
199 #define CQSPI_REG_SIZE_BLOCK_LSB 16
200 #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
201 #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
202 #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
203
204 #define CQSPI_REG_SRAMPARTITION 0x18
205 #define CQSPI_REG_INDIRECTTRIGGER 0x1C
206
207 #define CQSPI_REG_DMA 0x20
208 #define CQSPI_REG_DMA_SINGLE_LSB 0
209 #define CQSPI_REG_DMA_BURST_LSB 8
210 #define CQSPI_REG_DMA_SINGLE_MASK 0xFF
211 #define CQSPI_REG_DMA_BURST_MASK 0xFF
212
213 #define CQSPI_REG_REMAP 0x24
214 #define CQSPI_REG_MODE_BIT 0x28
215
216 #define CQSPI_REG_SDRAMLEVEL 0x2C
217 #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
218 #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
219 #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
220 #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
221
222 #define CQSPI_REG_WR_COMPLETION_CTRL 0x38
223 #define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14)
224
225 #define CQSPI_REG_IRQSTATUS 0x40
226 #define CQSPI_REG_IRQMASK 0x44
227
228 #define CQSPI_REG_WR_PROT_CTRL 0x58
229
230 #define CQSPI_REG_INDIRECTRD 0x60
231 #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
232 #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
233 #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
234
235 #define CQSPI_REG_INDIRECTRDWATERMARK 0x64
236 #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
237 #define CQSPI_REG_INDIRECTRDBYTES 0x6C
238
239 #define CQSPI_REG_CMDCTRL 0x90
240 #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
241 #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
242 #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7
243 #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
244 #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
245 #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
246 #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
247 #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
248 #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
249 #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
250 #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
251 #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
252 #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
253 #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F
254
255 #define CQSPI_REG_INDIRECTWR 0x70
256 #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
257 #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
258 #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
259
260 #define CQSPI_REG_INDIRECTWRWATERMARK 0x74
261 #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
262 #define CQSPI_REG_INDIRECTWRBYTES 0x7C
263
264 #define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
265
266 #define CQSPI_REG_CMDADDRESS 0x94
267 #define CQSPI_REG_CMDREADDATALOWER 0xA0
268 #define CQSPI_REG_CMDREADDATAUPPER 0xA4
269 #define CQSPI_REG_CMDWRITEDATALOWER 0xA8
270 #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
271
272 #define CQSPI_REG_POLLING_STATUS 0xB0
273 #define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16
274
275 #define CQSPI_REG_OP_EXT_LOWER 0xE0
276 #define CQSPI_REG_OP_EXT_READ_LSB 24
277 #define CQSPI_REG_OP_EXT_WRITE_LSB 16
278 #define CQSPI_REG_OP_EXT_STIG_LSB 0
279
280 #define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000
281
282 #define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800
283 #define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804
284
285 #define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C
286
287 #define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814
288 #define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818
289 #define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C
290 #define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1)
291
292 #define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828
293
294 #define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00
295 #define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6
296
297 /* Interrupt status bits */
298 #define CQSPI_REG_IRQ_MODE_ERR BIT(0)
299 #define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
300 #define CQSPI_REG_IRQ_IND_COMP BIT(2)
301 #define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
302 #define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
303 #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
304 #define CQSPI_REG_IRQ_WATERMARK BIT(6)
305 #define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
306
307 #define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
308 CQSPI_REG_IRQ_IND_SRAM_FULL | \
309 CQSPI_REG_IRQ_IND_COMP)
310
311 #define CQSPI_IRQ_MASK_RD_SLOW_SRAM (CQSPI_REG_IRQ_WATERMARK | \
312 CQSPI_REG_IRQ_IND_COMP)
313
314 #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
315 CQSPI_REG_IRQ_WATERMARK | \
316 CQSPI_REG_IRQ_UNDERFLOW)
317
318 #define CQSPI_IRQ_STATUS_MASK 0x1FFFF
319 #define CQSPI_DMA_UNALIGN 0x3
320
321 #define CQSPI_REG_VERSAL_DMA_VAL 0x602
322
cqspi_wait_for_bit(const struct cqspi_driver_platdata * ddata,void __iomem * reg,const u32 mask,bool clr,bool busywait)323 static int cqspi_wait_for_bit(const struct cqspi_driver_platdata *ddata,
324 void __iomem *reg, const u32 mask, bool clr,
325 bool busywait)
326 {
327 u64 timeout_us = CQSPI_TIMEOUT_MS * USEC_PER_MSEC;
328 u32 val;
329
330 if (busywait) {
331 int ret = readl_relaxed_poll_timeout(reg, val,
332 (((clr ? ~val : val) & mask) == mask),
333 0, CQSPI_BUSYWAIT_TIMEOUT_US);
334
335 if (ret != -ETIMEDOUT)
336 return ret;
337
338 timeout_us -= CQSPI_BUSYWAIT_TIMEOUT_US;
339 }
340
341 return readl_relaxed_poll_timeout(reg, val,
342 (((clr ? ~val : val) & mask) == mask),
343 10, timeout_us);
344 }
345
cqspi_is_idle(struct cqspi_st * cqspi)346 static bool cqspi_is_idle(struct cqspi_st *cqspi)
347 {
348 u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
349
350 return reg & BIT(CQSPI_REG_CONFIG_IDLE_LSB);
351 }
352
cqspi_get_rd_sram_level(struct cqspi_st * cqspi)353 static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
354 {
355 u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
356
357 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
358 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
359 }
360
cqspi_get_versal_dma_status(struct cqspi_st * cqspi)361 static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi)
362 {
363 u32 dma_status;
364
365 dma_status = readl(cqspi->iobase +
366 CQSPI_REG_VERSAL_DMA_DST_I_STS);
367 writel(dma_status, cqspi->iobase +
368 CQSPI_REG_VERSAL_DMA_DST_I_STS);
369
370 return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK;
371 }
372
cqspi_irq_handler(int this_irq,void * dev)373 static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
374 {
375 struct cqspi_st *cqspi = dev;
376 const struct cqspi_driver_platdata *ddata = cqspi->ddata;
377 unsigned int irq_status;
378
379 /* Read interrupt status */
380 irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
381
382 /* Clear interrupt */
383 writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
384
385 if (cqspi->use_dma_read && ddata && ddata->get_dma_status)
386 irq_status = ddata->get_dma_status(cqspi);
387 else if (cqspi->slow_sram)
388 irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR;
389 else
390 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
391
392 if (irq_status)
393 complete(&cqspi->transfer_complete);
394
395 return IRQ_HANDLED;
396 }
397
cqspi_calc_rdreg(const struct spi_mem_op * op)398 static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op)
399 {
400 u32 rdreg = 0;
401
402 rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
403 rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
404 rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
405
406 return rdreg;
407 }
408
cqspi_calc_dummy(const struct spi_mem_op * op)409 static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op)
410 {
411 unsigned int dummy_clk;
412
413 if (!op->dummy.nbytes)
414 return 0;
415
416 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
417 if (op->cmd.dtr)
418 dummy_clk /= 2;
419
420 return dummy_clk;
421 }
422
cqspi_wait_idle(struct cqspi_st * cqspi)423 static int cqspi_wait_idle(struct cqspi_st *cqspi)
424 {
425 const unsigned int poll_idle_retry = 3;
426 unsigned int count = 0;
427 unsigned long timeout;
428
429 timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
430 while (1) {
431 /*
432 * Read few times in succession to ensure the controller
433 * is indeed idle, that is, the bit does not transition
434 * low again.
435 */
436 if (cqspi_is_idle(cqspi))
437 count++;
438 else
439 count = 0;
440
441 if (count >= poll_idle_retry)
442 return 0;
443
444 if (time_after(jiffies, timeout)) {
445 /* Timeout, in busy mode. */
446 dev_err(&cqspi->pdev->dev,
447 "QSPI is still busy after %dms timeout.\n",
448 CQSPI_TIMEOUT_MS);
449 return -ETIMEDOUT;
450 }
451
452 cpu_relax();
453 }
454 }
455
cqspi_exec_flash_cmd(struct cqspi_st * cqspi,unsigned int reg)456 static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
457 {
458 void __iomem *reg_base = cqspi->iobase;
459 int ret;
460
461 /* Write the CMDCTRL without start execution. */
462 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
463 /* Start execute */
464 reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
465 writel(reg, reg_base + CQSPI_REG_CMDCTRL);
466
467 /* Polling for completion. */
468 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_CMDCTRL,
469 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1, true);
470 if (ret) {
471 dev_err(&cqspi->pdev->dev,
472 "Flash command execution timed out.\n");
473 return ret;
474 }
475
476 /* Polling QSPI idle status. */
477 return cqspi_wait_idle(cqspi);
478 }
479
cqspi_setup_opcode_ext(struct cqspi_flash_pdata * f_pdata,const struct spi_mem_op * op,unsigned int shift)480 static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
481 const struct spi_mem_op *op,
482 unsigned int shift)
483 {
484 struct cqspi_st *cqspi = f_pdata->cqspi;
485 void __iomem *reg_base = cqspi->iobase;
486 unsigned int reg;
487 u8 ext;
488
489 if (op->cmd.nbytes != 2)
490 return -EINVAL;
491
492 /* Opcode extension is the LSB. */
493 ext = op->cmd.opcode & 0xff;
494
495 reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER);
496 reg &= ~(0xff << shift);
497 reg |= ext << shift;
498 writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER);
499
500 return 0;
501 }
502
cqspi_enable_dtr(struct cqspi_flash_pdata * f_pdata,const struct spi_mem_op * op,unsigned int shift)503 static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
504 const struct spi_mem_op *op, unsigned int shift)
505 {
506 struct cqspi_st *cqspi = f_pdata->cqspi;
507 void __iomem *reg_base = cqspi->iobase;
508 unsigned int reg;
509 int ret;
510
511 reg = readl(reg_base + CQSPI_REG_CONFIG);
512
513 /*
514 * We enable dual byte opcode here. The callers have to set up the
515 * extension opcode based on which type of operation it is.
516 */
517 if (op->cmd.dtr) {
518 reg |= CQSPI_REG_CONFIG_DTR_PROTO;
519 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
520
521 /* Set up command opcode extension. */
522 ret = cqspi_setup_opcode_ext(f_pdata, op, shift);
523 if (ret)
524 return ret;
525 } else {
526 unsigned int mask = CQSPI_REG_CONFIG_DTR_PROTO | CQSPI_REG_CONFIG_DUAL_OPCODE;
527 /* Shortcut if DTR is already disabled. */
528 if ((reg & mask) == 0)
529 return 0;
530 reg &= ~mask;
531 }
532
533 writel(reg, reg_base + CQSPI_REG_CONFIG);
534
535 return cqspi_wait_idle(cqspi);
536 }
537
cqspi_command_read(struct cqspi_flash_pdata * f_pdata,const struct spi_mem_op * op)538 static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
539 const struct spi_mem_op *op)
540 {
541 struct cqspi_st *cqspi = f_pdata->cqspi;
542 void __iomem *reg_base = cqspi->iobase;
543 u8 *rxbuf = op->data.buf.in;
544 u8 opcode;
545 size_t n_rx = op->data.nbytes;
546 unsigned int rdreg;
547 unsigned int reg;
548 unsigned int dummy_clk;
549 size_t read_len;
550 int status;
551
552 status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
553 if (status)
554 return status;
555
556 if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
557 dev_err(&cqspi->pdev->dev,
558 "Invalid input argument, len %zu rxbuf 0x%p\n",
559 n_rx, rxbuf);
560 return -EINVAL;
561 }
562
563 if (op->cmd.dtr)
564 opcode = op->cmd.opcode >> 8;
565 else
566 opcode = op->cmd.opcode;
567
568 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
569
570 rdreg = cqspi_calc_rdreg(op);
571 writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
572
573 dummy_clk = cqspi_calc_dummy(op);
574 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
575 return -EOPNOTSUPP;
576
577 if (dummy_clk)
578 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
579 << CQSPI_REG_CMDCTRL_DUMMY_LSB;
580
581 reg |= BIT(CQSPI_REG_CMDCTRL_RD_EN_LSB);
582
583 /* 0 means 1 byte. */
584 reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
585 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
586
587 /* setup ADDR BIT field */
588 if (op->addr.nbytes) {
589 reg |= BIT(CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
590 reg |= ((op->addr.nbytes - 1) &
591 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
592 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
593
594 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
595 }
596
597 status = cqspi_exec_flash_cmd(cqspi, reg);
598 if (status)
599 return status;
600
601 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
602
603 /* Put the read value into rx_buf */
604 read_len = (n_rx > 4) ? 4 : n_rx;
605 memcpy(rxbuf, ®, read_len);
606 rxbuf += read_len;
607
608 if (n_rx > 4) {
609 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
610
611 read_len = n_rx - read_len;
612 memcpy(rxbuf, ®, read_len);
613 }
614
615 /* Reset CMD_CTRL Reg once command read completes */
616 writel(0, reg_base + CQSPI_REG_CMDCTRL);
617
618 return 0;
619 }
620
cqspi_command_write(struct cqspi_flash_pdata * f_pdata,const struct spi_mem_op * op)621 static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
622 const struct spi_mem_op *op)
623 {
624 struct cqspi_st *cqspi = f_pdata->cqspi;
625 void __iomem *reg_base = cqspi->iobase;
626 u8 opcode;
627 const u8 *txbuf = op->data.buf.out;
628 size_t n_tx = op->data.nbytes;
629 unsigned int reg;
630 unsigned int data;
631 size_t write_len;
632 int ret;
633
634 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB);
635 if (ret)
636 return ret;
637
638 if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
639 dev_err(&cqspi->pdev->dev,
640 "Invalid input argument, cmdlen %zu txbuf 0x%p\n",
641 n_tx, txbuf);
642 return -EINVAL;
643 }
644
645 reg = cqspi_calc_rdreg(op);
646 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
647
648 if (op->cmd.dtr)
649 opcode = op->cmd.opcode >> 8;
650 else
651 opcode = op->cmd.opcode;
652
653 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
654
655 if (op->addr.nbytes) {
656 reg |= BIT(CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
657 reg |= ((op->addr.nbytes - 1) &
658 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
659 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
660
661 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
662 }
663
664 if (n_tx) {
665 reg |= BIT(CQSPI_REG_CMDCTRL_WR_EN_LSB);
666 reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
667 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
668 data = 0;
669 write_len = (n_tx > 4) ? 4 : n_tx;
670 memcpy(&data, txbuf, write_len);
671 txbuf += write_len;
672 writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
673
674 if (n_tx > 4) {
675 data = 0;
676 write_len = n_tx - 4;
677 memcpy(&data, txbuf, write_len);
678 writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
679 }
680 }
681
682 ret = cqspi_exec_flash_cmd(cqspi, reg);
683
684 /* Reset CMD_CTRL Reg once command write completes */
685 writel(0, reg_base + CQSPI_REG_CMDCTRL);
686
687 return ret;
688 }
689
cqspi_read_setup(struct cqspi_flash_pdata * f_pdata,const struct spi_mem_op * op)690 static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
691 const struct spi_mem_op *op)
692 {
693 struct cqspi_st *cqspi = f_pdata->cqspi;
694 void __iomem *reg_base = cqspi->iobase;
695 unsigned int dummy_clk = 0;
696 unsigned int reg;
697 int ret;
698 u8 opcode;
699
700 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB);
701 if (ret)
702 return ret;
703
704 if (op->cmd.dtr)
705 opcode = op->cmd.opcode >> 8;
706 else
707 opcode = op->cmd.opcode;
708
709 reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
710 reg |= cqspi_calc_rdreg(op);
711
712 /* Setup dummy clock cycles */
713 dummy_clk = cqspi_calc_dummy(op);
714
715 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
716 return -EOPNOTSUPP;
717
718 if (dummy_clk)
719 reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
720 << CQSPI_REG_RD_INSTR_DUMMY_LSB;
721
722 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
723
724 /* Set address width */
725 reg = readl(reg_base + CQSPI_REG_SIZE);
726 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
727 reg |= (op->addr.nbytes - 1);
728 writel(reg, reg_base + CQSPI_REG_SIZE);
729 readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
730 return 0;
731 }
732
cqspi_indirect_read_execute(struct cqspi_flash_pdata * f_pdata,u8 * rxbuf,loff_t from_addr,const size_t n_rx)733 static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
734 u8 *rxbuf, loff_t from_addr,
735 const size_t n_rx)
736 {
737 struct cqspi_st *cqspi = f_pdata->cqspi;
738 bool use_irq = !(cqspi->ddata && cqspi->ddata->quirks & CQSPI_RD_NO_IRQ);
739 struct device *dev = &cqspi->pdev->dev;
740 void __iomem *reg_base = cqspi->iobase;
741 void __iomem *ahb_base = cqspi->ahb_base;
742 unsigned int remaining = n_rx;
743 unsigned int mod_bytes = n_rx % 4;
744 unsigned int bytes_to_read = 0;
745 u8 *rxbuf_end = rxbuf + n_rx;
746 int ret = 0;
747
748 if (!refcount_read(&cqspi->refcount))
749 return -ENODEV;
750
751 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
752 writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
753
754 /* Clear all interrupts. */
755 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
756
757 /*
758 * On SoCFPGA platform reading the SRAM is slow due to
759 * hardware limitation and causing read interrupt storm to CPU,
760 * so enabling only watermark interrupt to disable all read
761 * interrupts later as we want to run "bytes to read" loop with
762 * all the read interrupts disabled for max performance.
763 */
764
765 if (use_irq && cqspi->slow_sram)
766 writel(CQSPI_IRQ_MASK_RD_SLOW_SRAM, reg_base + CQSPI_REG_IRQMASK);
767 else if (use_irq)
768 writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
769 else
770 writel(0, reg_base + CQSPI_REG_IRQMASK);
771
772 reinit_completion(&cqspi->transfer_complete);
773 writel(CQSPI_REG_INDIRECTRD_START_MASK,
774 reg_base + CQSPI_REG_INDIRECTRD);
775 readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */
776
777 while (remaining > 0) {
778 ret = 0;
779 if (use_irq &&
780 !wait_for_completion_timeout(&cqspi->transfer_complete,
781 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
782 ret = -ETIMEDOUT;
783
784 /*
785 * Prevent lost interrupt and race condition by reinitializing early.
786 * A spurious wakeup and another wait cycle can occur here,
787 * which is preferable to waiting until timeout if interrupt is lost.
788 */
789 if (use_irq)
790 reinit_completion(&cqspi->transfer_complete);
791
792 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
793
794 if (ret && bytes_to_read == 0) {
795 dev_err(dev, "Indirect read timeout, no bytes\n");
796 goto failrd;
797 }
798
799 while (bytes_to_read != 0) {
800 unsigned int word_remain = round_down(remaining, 4);
801
802 bytes_to_read *= cqspi->fifo_width;
803 bytes_to_read = bytes_to_read > remaining ?
804 remaining : bytes_to_read;
805 bytes_to_read = round_down(bytes_to_read, 4);
806 /* Read 4 byte word chunks then single bytes */
807 if (bytes_to_read) {
808 ioread32_rep(ahb_base, rxbuf,
809 (bytes_to_read / 4));
810 } else if (!word_remain && mod_bytes) {
811 unsigned int temp = ioread32(ahb_base);
812
813 bytes_to_read = mod_bytes;
814 memcpy(rxbuf, &temp, min((unsigned int)
815 (rxbuf_end - rxbuf),
816 bytes_to_read));
817 }
818 rxbuf += bytes_to_read;
819 remaining -= bytes_to_read;
820 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
821 }
822 }
823
824 /* Check indirect done status */
825 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTRD,
826 CQSPI_REG_INDIRECTRD_DONE_MASK, 0, true);
827 if (ret) {
828 dev_err(dev, "Indirect read completion error (%i)\n", ret);
829 goto failrd;
830 }
831
832 /* Disable interrupt */
833 writel(0, reg_base + CQSPI_REG_IRQMASK);
834
835 /* Clear indirect completion status */
836 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
837
838 return 0;
839
840 failrd:
841 /* Disable interrupt */
842 writel(0, reg_base + CQSPI_REG_IRQMASK);
843
844 /* Cancel the indirect read */
845 writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK,
846 reg_base + CQSPI_REG_INDIRECTRD);
847 return ret;
848 }
849
cqspi_device_reset(struct cqspi_st * cqspi)850 static void cqspi_device_reset(struct cqspi_st *cqspi)
851 {
852 u32 reg;
853
854 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
855 reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK;
856 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
857 /*
858 * NOTE: Delay timing implementation is derived from
859 * spi_nor_hw_reset()
860 */
861 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
862 usleep_range(1, 5);
863 writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
864 usleep_range(100, 150);
865 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG);
866 usleep_range(1000, 1200);
867 }
868
cqspi_controller_enable(struct cqspi_st * cqspi,bool enable)869 static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
870 {
871 void __iomem *reg_base = cqspi->iobase;
872 unsigned int reg;
873
874 reg = readl(reg_base + CQSPI_REG_CONFIG);
875
876 if (enable)
877 reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
878 else
879 reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
880
881 writel(reg, reg_base + CQSPI_REG_CONFIG);
882 }
883
cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata * f_pdata,u_char * rxbuf,loff_t from_addr,size_t n_rx)884 static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata,
885 u_char *rxbuf, loff_t from_addr,
886 size_t n_rx)
887 {
888 struct cqspi_st *cqspi = f_pdata->cqspi;
889 struct device *dev = &cqspi->pdev->dev;
890 void __iomem *reg_base = cqspi->iobase;
891 u32 reg, bytes_to_dma;
892 loff_t addr = from_addr;
893 void *buf = rxbuf;
894 dma_addr_t dma_addr;
895 u8 bytes_rem;
896 int ret = 0;
897
898 bytes_rem = n_rx % 4;
899 bytes_to_dma = (n_rx - bytes_rem);
900
901 if (!bytes_to_dma)
902 goto nondmard;
903
904 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA);
905 if (ret)
906 return ret;
907
908 cqspi_controller_enable(cqspi, 0);
909
910 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
911 reg |= CQSPI_REG_CONFIG_DMA_MASK;
912 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
913
914 cqspi_controller_enable(cqspi, 1);
915
916 dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE);
917 if (dma_mapping_error(dev, dma_addr)) {
918 dev_err(dev, "dma mapping failed\n");
919 return -ENOMEM;
920 }
921
922 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
923 writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
924 writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL,
925 reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
926
927 /* Clear all interrupts. */
928 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
929
930 /* Enable DMA done interrupt */
931 writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK,
932 reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN);
933
934 /* Default DMA periph configuration */
935 writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA);
936
937 /* Configure DMA Dst address */
938 writel(lower_32_bits(dma_addr),
939 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR);
940 writel(upper_32_bits(dma_addr),
941 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB);
942
943 /* Configure DMA Src address */
944 writel(cqspi->trigger_address, reg_base +
945 CQSPI_REG_VERSAL_DMA_SRC_ADDR);
946
947 /* Set DMA destination size */
948 writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE);
949
950 /* Set DMA destination control */
951 writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL,
952 reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL);
953
954 writel(CQSPI_REG_INDIRECTRD_START_MASK,
955 reg_base + CQSPI_REG_INDIRECTRD);
956
957 reinit_completion(&cqspi->transfer_complete);
958
959 if (!wait_for_completion_timeout(&cqspi->transfer_complete,
960 msecs_to_jiffies(max_t(size_t, bytes_to_dma, 500)))) {
961 ret = -ETIMEDOUT;
962 goto failrd;
963 }
964
965 /* Disable DMA interrupt */
966 writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
967
968 /* Clear indirect completion status */
969 writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
970 cqspi->iobase + CQSPI_REG_INDIRECTRD);
971 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
972
973 cqspi_controller_enable(cqspi, 0);
974
975 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
976 reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
977 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
978
979 cqspi_controller_enable(cqspi, 1);
980
981 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id,
982 PM_OSPI_MUX_SEL_LINEAR);
983 if (ret)
984 return ret;
985
986 nondmard:
987 if (bytes_rem) {
988 addr += bytes_to_dma;
989 buf += bytes_to_dma;
990 ret = cqspi_indirect_read_execute(f_pdata, buf, addr,
991 bytes_rem);
992 if (ret)
993 return ret;
994 }
995
996 return 0;
997
998 failrd:
999 /* Disable DMA interrupt */
1000 writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
1001
1002 /* Cancel the indirect read */
1003 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
1004 reg_base + CQSPI_REG_INDIRECTRD);
1005
1006 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
1007
1008 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1009 reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
1010 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1011
1012 zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR);
1013
1014 return ret;
1015 }
1016
cqspi_write_setup(struct cqspi_flash_pdata * f_pdata,const struct spi_mem_op * op)1017 static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
1018 const struct spi_mem_op *op)
1019 {
1020 unsigned int reg;
1021 int ret;
1022 struct cqspi_st *cqspi = f_pdata->cqspi;
1023 void __iomem *reg_base = cqspi->iobase;
1024 u8 opcode;
1025
1026 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB);
1027 if (ret)
1028 return ret;
1029
1030 if (op->cmd.dtr)
1031 opcode = op->cmd.opcode >> 8;
1032 else
1033 opcode = op->cmd.opcode;
1034
1035 /* Set opcode. */
1036 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
1037 reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
1038 reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
1039 writel(reg, reg_base + CQSPI_REG_WR_INSTR);
1040 reg = cqspi_calc_rdreg(op);
1041 writel(reg, reg_base + CQSPI_REG_RD_INSTR);
1042
1043 /*
1044 * SPI NAND flashes require the address of the status register to be
1045 * passed in the Read SR command. Also, some SPI NOR flashes like the
1046 * cypress Semper flash expect a 4-byte dummy address in the Read SR
1047 * command in DTR mode.
1048 *
1049 * But this controller does not support address phase in the Read SR
1050 * command when doing auto-HW polling. So, disable write completion
1051 * polling on the controller's side. spinand and spi-nor will take
1052 * care of polling the status register.
1053 */
1054 if (cqspi->wr_completion) {
1055 reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
1056 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
1057 writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
1058 /*
1059 * DAC mode require auto polling as flash needs to be polled
1060 * for write completion in case of bubble in SPI transaction
1061 * due to slow CPU/DMA master.
1062 */
1063 cqspi->use_direct_mode_wr = false;
1064 }
1065
1066 reg = readl(reg_base + CQSPI_REG_SIZE);
1067 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
1068 reg |= (op->addr.nbytes - 1);
1069 writel(reg, reg_base + CQSPI_REG_SIZE);
1070 readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */
1071 return 0;
1072 }
1073
cqspi_indirect_write_execute(struct cqspi_flash_pdata * f_pdata,loff_t to_addr,const u8 * txbuf,const size_t n_tx)1074 static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
1075 loff_t to_addr, const u8 *txbuf,
1076 const size_t n_tx)
1077 {
1078 struct cqspi_st *cqspi = f_pdata->cqspi;
1079 struct device *dev = &cqspi->pdev->dev;
1080 void __iomem *reg_base = cqspi->iobase;
1081 unsigned int remaining = n_tx;
1082 unsigned int write_bytes;
1083 int ret;
1084
1085 if (!refcount_read(&cqspi->refcount))
1086 return -ENODEV;
1087
1088 writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
1089 writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
1090
1091 /* Clear all interrupts. */
1092 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
1093
1094 writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
1095
1096 reinit_completion(&cqspi->transfer_complete);
1097 writel(CQSPI_REG_INDIRECTWR_START_MASK,
1098 reg_base + CQSPI_REG_INDIRECTWR);
1099 readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */
1100
1101 /*
1102 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
1103 * Controller programming sequence, couple of cycles of
1104 * QSPI_REF_CLK delay is required for the above bit to
1105 * be internally synchronized by the QSPI module. Provide 5
1106 * cycles of delay.
1107 */
1108 if (cqspi->wr_delay)
1109 ndelay(cqspi->wr_delay);
1110
1111 /*
1112 * If a hazard exists between the APB and AHB interfaces, perform a
1113 * dummy readback from the controller to ensure synchronization.
1114 */
1115 if (cqspi->apb_ahb_hazard)
1116 readl(reg_base + CQSPI_REG_INDIRECTWR);
1117
1118 while (remaining > 0) {
1119 size_t write_words, mod_bytes;
1120
1121 write_bytes = remaining;
1122 write_words = write_bytes / 4;
1123 mod_bytes = write_bytes % 4;
1124 /* Write 4 bytes at a time then single bytes. */
1125 if (write_words) {
1126 iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
1127 txbuf += (write_words * 4);
1128 }
1129 if (mod_bytes) {
1130 unsigned int temp = 0xFFFFFFFF;
1131
1132 memcpy(&temp, txbuf, mod_bytes);
1133 iowrite32(temp, cqspi->ahb_base);
1134 txbuf += mod_bytes;
1135 }
1136
1137 if (!wait_for_completion_timeout(&cqspi->transfer_complete,
1138 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
1139 dev_err(dev, "Indirect write timeout\n");
1140 ret = -ETIMEDOUT;
1141 goto failwr;
1142 }
1143
1144 remaining -= write_bytes;
1145
1146 if (remaining > 0)
1147 reinit_completion(&cqspi->transfer_complete);
1148 }
1149
1150 /* Check indirect done status */
1151 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTWR,
1152 CQSPI_REG_INDIRECTWR_DONE_MASK, 0, false);
1153 if (ret) {
1154 dev_err(dev, "Indirect write completion error (%i)\n", ret);
1155 goto failwr;
1156 }
1157
1158 /* Disable interrupt. */
1159 writel(0, reg_base + CQSPI_REG_IRQMASK);
1160
1161 /* Clear indirect completion status */
1162 writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
1163
1164 cqspi_wait_idle(cqspi);
1165
1166 return 0;
1167
1168 failwr:
1169 /* Disable interrupt. */
1170 writel(0, reg_base + CQSPI_REG_IRQMASK);
1171
1172 /* Cancel the indirect write */
1173 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
1174 reg_base + CQSPI_REG_INDIRECTWR);
1175 return ret;
1176 }
1177
cqspi_chipselect(struct cqspi_flash_pdata * f_pdata)1178 static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata)
1179 {
1180 struct cqspi_st *cqspi = f_pdata->cqspi;
1181 void __iomem *reg_base = cqspi->iobase;
1182 unsigned int chip_select = f_pdata->cs;
1183 unsigned int reg;
1184
1185 reg = readl(reg_base + CQSPI_REG_CONFIG);
1186 if (cqspi->is_decoded_cs) {
1187 reg |= CQSPI_REG_CONFIG_DECODE_MASK;
1188 } else {
1189 reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
1190
1191 /* Convert CS if without decoder.
1192 * CS0 to 4b'1110
1193 * CS1 to 4b'1101
1194 * CS2 to 4b'1011
1195 * CS3 to 4b'0111
1196 */
1197 chip_select = 0xF & ~BIT(chip_select);
1198 }
1199
1200 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
1201 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
1202 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
1203 << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
1204 writel(reg, reg_base + CQSPI_REG_CONFIG);
1205 }
1206
calculate_ticks_for_ns(const unsigned int ref_clk_hz,const unsigned int ns_val)1207 static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
1208 const unsigned int ns_val)
1209 {
1210 unsigned int ticks;
1211
1212 ticks = ref_clk_hz / 1000; /* kHz */
1213 ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
1214
1215 return ticks;
1216 }
1217
cqspi_delay(struct cqspi_flash_pdata * f_pdata)1218 static void cqspi_delay(struct cqspi_flash_pdata *f_pdata)
1219 {
1220 struct cqspi_st *cqspi = f_pdata->cqspi;
1221 void __iomem *iobase = cqspi->iobase;
1222 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
1223 unsigned int tshsl, tchsh, tslch, tsd2d;
1224 unsigned int reg;
1225 unsigned int tsclk;
1226
1227 /* calculate the number of ref ticks for one sclk tick */
1228 tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
1229
1230 tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
1231 /* this particular value must be at least one sclk */
1232 if (tshsl < tsclk)
1233 tshsl = tsclk;
1234
1235 tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
1236 tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
1237 tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
1238
1239 reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
1240 << CQSPI_REG_DELAY_TSHSL_LSB;
1241 reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
1242 << CQSPI_REG_DELAY_TCHSH_LSB;
1243 reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
1244 << CQSPI_REG_DELAY_TSLCH_LSB;
1245 reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
1246 << CQSPI_REG_DELAY_TSD2D_LSB;
1247 writel(reg, iobase + CQSPI_REG_DELAY);
1248 }
1249
cqspi_config_baudrate_div(struct cqspi_st * cqspi)1250 static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
1251 {
1252 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
1253 void __iomem *reg_base = cqspi->iobase;
1254 u32 reg, div;
1255
1256 /* Recalculate the baudrate divisor based on QSPI specification. */
1257 div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
1258
1259 /* Maximum baud divisor */
1260 if (div > CQSPI_REG_CONFIG_BAUD_MASK) {
1261 div = CQSPI_REG_CONFIG_BAUD_MASK;
1262 dev_warn(&cqspi->pdev->dev,
1263 "Unable to adjust clock <= %d hz. Reduced to %d hz\n",
1264 cqspi->sclk, ref_clk_hz/((div+1)*2));
1265 }
1266
1267 reg = readl(reg_base + CQSPI_REG_CONFIG);
1268 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
1269 reg |= div << CQSPI_REG_CONFIG_BAUD_LSB;
1270 writel(reg, reg_base + CQSPI_REG_CONFIG);
1271 }
1272
cqspi_readdata_capture(struct cqspi_st * cqspi,const bool bypass,const unsigned int delay)1273 static void cqspi_readdata_capture(struct cqspi_st *cqspi,
1274 const bool bypass,
1275 const unsigned int delay)
1276 {
1277 void __iomem *reg_base = cqspi->iobase;
1278 unsigned int reg;
1279
1280 reg = readl(reg_base + CQSPI_REG_READCAPTURE);
1281
1282 if (bypass)
1283 reg |= BIT(CQSPI_REG_READCAPTURE_BYPASS_LSB);
1284 else
1285 reg &= ~BIT(CQSPI_REG_READCAPTURE_BYPASS_LSB);
1286
1287 reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
1288 << CQSPI_REG_READCAPTURE_DELAY_LSB);
1289
1290 reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
1291 << CQSPI_REG_READCAPTURE_DELAY_LSB;
1292
1293 writel(reg, reg_base + CQSPI_REG_READCAPTURE);
1294 }
1295
cqspi_configure(struct cqspi_flash_pdata * f_pdata,unsigned long sclk)1296 static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
1297 unsigned long sclk)
1298 {
1299 struct cqspi_st *cqspi = f_pdata->cqspi;
1300 int switch_cs = (cqspi->current_cs != f_pdata->cs);
1301 int switch_ck = (cqspi->sclk != sclk);
1302
1303 if (switch_cs || switch_ck)
1304 cqspi_controller_enable(cqspi, 0);
1305
1306 /* Switch chip select. */
1307 if (switch_cs) {
1308 cqspi->current_cs = f_pdata->cs;
1309 cqspi_chipselect(f_pdata);
1310 }
1311
1312 /* Setup baudrate divisor and delays */
1313 if (switch_ck) {
1314 cqspi->sclk = sclk;
1315 cqspi_config_baudrate_div(cqspi);
1316 cqspi_delay(f_pdata);
1317 cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
1318 f_pdata->read_delay);
1319 }
1320
1321 if (switch_cs || switch_ck)
1322 cqspi_controller_enable(cqspi, 1);
1323 }
1324
cqspi_write(struct cqspi_flash_pdata * f_pdata,const struct spi_mem_op * op)1325 static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
1326 const struct spi_mem_op *op)
1327 {
1328 struct cqspi_st *cqspi = f_pdata->cqspi;
1329 loff_t to = op->addr.val;
1330 size_t len = op->data.nbytes;
1331 const u_char *buf = op->data.buf.out;
1332 int ret;
1333
1334 ret = cqspi_write_setup(f_pdata, op);
1335 if (ret)
1336 return ret;
1337
1338 /*
1339 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
1340 * address (all 0s) with the read status register command in DTR mode.
1341 * But this controller does not support sending dummy address bytes to
1342 * the flash when it is polling the write completion register in DTR
1343 * mode. So, we can not use direct mode when in DTR mode for writing
1344 * data.
1345 */
1346 if ((!op->cmd.dtr && cqspi->use_direct_mode &&
1347 cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) ||
1348 (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) {
1349 memcpy_toio(cqspi->ahb_base + to, buf, len);
1350 return cqspi_wait_idle(cqspi);
1351 }
1352
1353 return cqspi_indirect_write_execute(f_pdata, to, buf, len);
1354 }
1355
cqspi_rx_dma_callback(void * param)1356 static void cqspi_rx_dma_callback(void *param)
1357 {
1358 struct cqspi_st *cqspi = param;
1359
1360 complete(&cqspi->rx_dma_complete);
1361 }
1362
cqspi_direct_read_execute(struct cqspi_flash_pdata * f_pdata,u_char * buf,loff_t from,size_t len)1363 static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
1364 u_char *buf, loff_t from, size_t len)
1365 {
1366 struct cqspi_st *cqspi = f_pdata->cqspi;
1367 struct device *dev = &cqspi->pdev->dev;
1368 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
1369 dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
1370 int ret = 0;
1371 struct dma_async_tx_descriptor *tx;
1372 dma_cookie_t cookie;
1373 dma_addr_t dma_dst;
1374 struct device *ddev;
1375
1376 if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
1377 memcpy_fromio(buf, cqspi->ahb_base + from, len);
1378 return 0;
1379 }
1380
1381 ddev = cqspi->rx_chan->device->dev;
1382 dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
1383 if (dma_mapping_error(ddev, dma_dst)) {
1384 dev_err(dev, "dma mapping failed\n");
1385 return -ENOMEM;
1386 }
1387 tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
1388 len, flags);
1389 if (!tx) {
1390 dev_err(dev, "device_prep_dma_memcpy error\n");
1391 ret = -EIO;
1392 goto err_unmap;
1393 }
1394
1395 tx->callback = cqspi_rx_dma_callback;
1396 tx->callback_param = cqspi;
1397 cookie = tx->tx_submit(tx);
1398 reinit_completion(&cqspi->rx_dma_complete);
1399
1400 ret = dma_submit_error(cookie);
1401 if (ret) {
1402 dev_err(dev, "dma_submit_error %d\n", cookie);
1403 ret = -EIO;
1404 goto err_unmap;
1405 }
1406
1407 dma_async_issue_pending(cqspi->rx_chan);
1408 if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
1409 msecs_to_jiffies(max_t(size_t, len, 500)))) {
1410 dmaengine_terminate_sync(cqspi->rx_chan);
1411 dev_err(dev, "DMA wait_for_completion_timeout\n");
1412 ret = -ETIMEDOUT;
1413 goto err_unmap;
1414 }
1415
1416 err_unmap:
1417 dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE);
1418
1419 return ret;
1420 }
1421
cqspi_read(struct cqspi_flash_pdata * f_pdata,const struct spi_mem_op * op)1422 static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
1423 const struct spi_mem_op *op)
1424 {
1425 struct cqspi_st *cqspi = f_pdata->cqspi;
1426 const struct cqspi_driver_platdata *ddata = cqspi->ddata;
1427 loff_t from = op->addr.val;
1428 size_t len = op->data.nbytes;
1429 u_char *buf = op->data.buf.in;
1430 u64 dma_align = (u64)(uintptr_t)buf;
1431 int ret;
1432
1433 ret = cqspi_read_setup(f_pdata, op);
1434 if (ret)
1435 return ret;
1436
1437 if ((cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) ||
1438 (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE))
1439 return cqspi_direct_read_execute(f_pdata, buf, from, len);
1440
1441 if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
1442 virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0))
1443 return ddata->indirect_read_dma(f_pdata, buf, from, len);
1444
1445 return cqspi_indirect_read_execute(f_pdata, buf, from, len);
1446 }
1447
cqspi_mem_process(struct spi_mem * mem,const struct spi_mem_op * op)1448 static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
1449 {
1450 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
1451 struct cqspi_flash_pdata *f_pdata;
1452
1453 f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)];
1454 cqspi_configure(f_pdata, op->max_freq);
1455
1456 if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
1457 /*
1458 * Performing reads in DAC mode forces to read minimum 4 bytes
1459 * which is unsupported on some flash devices during register
1460 * reads, prefer STIG mode for such small reads.
1461 */
1462 if (!op->addr.nbytes ||
1463 (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX &&
1464 !cqspi->disable_stig_mode))
1465 return cqspi_command_read(f_pdata, op);
1466
1467 return cqspi_read(f_pdata, op);
1468 }
1469
1470 if (!op->addr.nbytes || !op->data.buf.out)
1471 return cqspi_command_write(f_pdata, op);
1472
1473 return cqspi_write(f_pdata, op);
1474 }
1475
cqspi_exec_mem_op(struct spi_mem * mem,const struct spi_mem_op * op)1476 static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
1477 {
1478 int ret;
1479 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
1480 struct device *dev = &cqspi->pdev->dev;
1481 const struct cqspi_driver_platdata *ddata = of_device_get_match_data(dev);
1482
1483 if (refcount_read(&cqspi->inflight_ops) == 0)
1484 return -ENODEV;
1485
1486 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
1487 ret = pm_runtime_resume_and_get(dev);
1488 if (ret) {
1489 dev_err(&mem->spi->dev, "resume failed with %d\n", ret);
1490 return ret;
1491 }
1492 }
1493
1494 if (!refcount_read(&cqspi->refcount))
1495 return -EBUSY;
1496
1497 refcount_inc(&cqspi->inflight_ops);
1498
1499 if (!refcount_read(&cqspi->refcount)) {
1500 if (refcount_read(&cqspi->inflight_ops))
1501 refcount_dec(&cqspi->inflight_ops);
1502 return -EBUSY;
1503 }
1504
1505 ret = cqspi_mem_process(mem, op);
1506
1507 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
1508 pm_runtime_put_autosuspend(dev);
1509
1510 if (ret)
1511 dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
1512
1513 if (refcount_read(&cqspi->inflight_ops) > 1)
1514 refcount_dec(&cqspi->inflight_ops);
1515
1516 return ret;
1517 }
1518
cqspi_supports_mem_op(struct spi_mem * mem,const struct spi_mem_op * op)1519 static bool cqspi_supports_mem_op(struct spi_mem *mem,
1520 const struct spi_mem_op *op)
1521 {
1522 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
1523 bool all_true, all_false;
1524
1525 /*
1526 * op->dummy.dtr is required for converting nbytes into ncycles.
1527 * Also, don't check the dtr field of the op phase having zero nbytes.
1528 */
1529 all_true = op->cmd.dtr &&
1530 (!op->addr.nbytes || op->addr.dtr) &&
1531 (!op->dummy.nbytes || op->dummy.dtr) &&
1532 (!op->data.nbytes || op->data.dtr);
1533
1534 all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
1535 !op->data.dtr;
1536
1537 if (all_true) {
1538 /* Right now we only support 8-8-8 DTR mode. */
1539 if (op->cmd.nbytes && op->cmd.buswidth != 8)
1540 return false;
1541 if (op->addr.nbytes && op->addr.buswidth != 8)
1542 return false;
1543 if (op->data.nbytes && op->data.buswidth != 8)
1544 return false;
1545
1546 /* A single opcode is supported, it will be repeated */
1547 if ((op->cmd.opcode >> 8) != (op->cmd.opcode & 0xFF))
1548 return false;
1549
1550 if (cqspi->is_rzn1)
1551 return false;
1552 } else if (!all_false) {
1553 /* Mixed DTR modes are not supported. */
1554 return false;
1555 }
1556
1557 return spi_mem_default_supports_op(mem, op);
1558 }
1559
cqspi_of_get_flash_pdata(struct platform_device * pdev,struct cqspi_flash_pdata * f_pdata,struct device_node * np)1560 static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
1561 struct cqspi_flash_pdata *f_pdata,
1562 struct device_node *np)
1563 {
1564 if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
1565 dev_err(&pdev->dev, "couldn't determine read-delay\n");
1566 return -ENXIO;
1567 }
1568
1569 if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
1570 dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
1571 return -ENXIO;
1572 }
1573
1574 if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
1575 dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
1576 return -ENXIO;
1577 }
1578
1579 if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
1580 dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
1581 return -ENXIO;
1582 }
1583
1584 if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
1585 dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
1586 return -ENXIO;
1587 }
1588
1589 if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
1590 dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
1591 return -ENXIO;
1592 }
1593
1594 return 0;
1595 }
1596
cqspi_of_get_pdata(struct cqspi_st * cqspi)1597 static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
1598 {
1599 struct device *dev = &cqspi->pdev->dev;
1600 struct device_node *np = dev->of_node;
1601 u32 id[2];
1602
1603 cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
1604
1605 if (!(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) {
1606 if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
1607 /* Zero signals FIFO depth should be runtime detected. */
1608 cqspi->fifo_depth = 0;
1609 }
1610
1611 if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width))
1612 cqspi->fifo_width = 4;
1613
1614 if (of_property_read_u32(np, "cdns,trigger-address",
1615 &cqspi->trigger_address)) {
1616 dev_err(dev, "couldn't determine trigger-address\n");
1617 return -ENXIO;
1618 }
1619 }
1620
1621 if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect))
1622 cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT;
1623
1624 cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
1625
1626 if (!of_property_read_u32_array(np, "power-domains", id,
1627 ARRAY_SIZE(id)))
1628 cqspi->pd_dev_id = id[1];
1629
1630 return 0;
1631 }
1632
cqspi_controller_init(struct cqspi_st * cqspi)1633 static void cqspi_controller_init(struct cqspi_st *cqspi)
1634 {
1635 u32 reg;
1636
1637 /* Configure the remap address register, no remap */
1638 writel(0, cqspi->iobase + CQSPI_REG_REMAP);
1639
1640 /* Disable all interrupts. */
1641 writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
1642
1643 if (!(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) {
1644 /* Configure the SRAM split to 1:1 . */
1645 writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
1646 /* Load indirect trigger address. */
1647 writel(cqspi->trigger_address,
1648 cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
1649
1650 /* Program read watermark -- 1/2 of the FIFO. */
1651 writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
1652 cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
1653 /* Program write watermark -- 1/8 of the FIFO. */
1654 writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
1655 cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
1656 }
1657
1658 /* Disable write protection at controller level */
1659 if (cqspi->ddata && cqspi->ddata->quirks & CQSPI_HAS_WR_PROTECT)
1660 writel(0, cqspi->iobase + CQSPI_REG_WR_PROT_CTRL);
1661
1662 /* Disable direct access controller */
1663 if (!cqspi->use_direct_mode) {
1664 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1665 reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
1666 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1667 }
1668
1669 /* Enable DMA interface */
1670 if (cqspi->use_dma_read) {
1671 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1672 reg |= CQSPI_REG_CONFIG_DMA_MASK;
1673 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1674 }
1675 }
1676
cqspi_controller_detect_fifo_depth(struct cqspi_st * cqspi)1677 static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi)
1678 {
1679 struct device *dev = &cqspi->pdev->dev;
1680 u32 reg, fifo_depth;
1681
1682 if (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)
1683 return;
1684
1685 /*
1686 * Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N
1687 * the FIFO depth.
1688 */
1689 writel(U32_MAX, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
1690 reg = readl(cqspi->iobase + CQSPI_REG_SRAMPARTITION);
1691 fifo_depth = reg + 1;
1692
1693 /* FIFO depth of zero means no value from devicetree was provided. */
1694 if (cqspi->fifo_depth == 0) {
1695 cqspi->fifo_depth = fifo_depth;
1696 dev_dbg(dev, "using FIFO depth of %u\n", fifo_depth);
1697 } else if (fifo_depth != cqspi->fifo_depth) {
1698 dev_warn(dev, "detected FIFO depth (%u) different from config (%u)\n",
1699 fifo_depth, cqspi->fifo_depth);
1700 }
1701 }
1702
cqspi_request_mmap_dma(struct cqspi_st * cqspi)1703 static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
1704 {
1705 dma_cap_mask_t mask;
1706
1707 dma_cap_zero(mask);
1708 dma_cap_set(DMA_MEMCPY, mask);
1709
1710 cqspi->rx_chan = dma_request_chan_by_mask(&mask);
1711 if (IS_ERR(cqspi->rx_chan)) {
1712 int ret = PTR_ERR(cqspi->rx_chan);
1713
1714 cqspi->rx_chan = NULL;
1715 if (ret == -ENODEV) {
1716 /* DMA support is not mandatory */
1717 dev_info(&cqspi->pdev->dev, "No Rx DMA available\n");
1718 return 0;
1719 }
1720
1721 return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
1722 }
1723 init_completion(&cqspi->rx_dma_complete);
1724
1725 return 0;
1726 }
1727
cqspi_get_name(struct spi_mem * mem)1728 static const char *cqspi_get_name(struct spi_mem *mem)
1729 {
1730 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller);
1731 struct device *dev = &cqspi->pdev->dev;
1732
1733 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
1734 spi_get_chipselect(mem->spi, 0));
1735 }
1736
1737 static const struct spi_controller_mem_ops cqspi_mem_ops = {
1738 .exec_op = cqspi_exec_mem_op,
1739 .get_name = cqspi_get_name,
1740 .supports_op = cqspi_supports_mem_op,
1741 };
1742
1743 static const struct spi_controller_mem_caps cqspi_mem_caps = {
1744 .dtr = true,
1745 .per_op_freq = true,
1746 };
1747
cqspi_setup_flash(struct cqspi_st * cqspi)1748 static int cqspi_setup_flash(struct cqspi_st *cqspi)
1749 {
1750 struct platform_device *pdev = cqspi->pdev;
1751 struct device *dev = &pdev->dev;
1752 struct cqspi_flash_pdata *f_pdata;
1753 int ret, cs, max_cs = -1;
1754
1755 /* Get flash device data */
1756 for_each_available_child_of_node_scoped(dev->of_node, np) {
1757 ret = of_property_read_u32(np, "reg", &cs);
1758 if (ret) {
1759 dev_err(dev, "Couldn't determine chip select.\n");
1760 return ret;
1761 }
1762
1763 if (cs >= cqspi->num_chipselect) {
1764 dev_err(dev, "Chip select %d out of range.\n", cs);
1765 return -EINVAL;
1766 }
1767
1768 max_cs = max_t(int, cs, max_cs);
1769
1770 f_pdata = &cqspi->f_pdata[cs];
1771 f_pdata->cqspi = cqspi;
1772 f_pdata->cs = cs;
1773
1774 ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
1775 if (ret)
1776 return ret;
1777 }
1778
1779 if (max_cs < 0) {
1780 dev_err(dev, "No flash device declared\n");
1781 return -ENODEV;
1782 }
1783
1784 cqspi->num_chipselect = max_cs + 1;
1785 return 0;
1786 }
1787
cqspi_probe(struct platform_device * pdev)1788 static int cqspi_probe(struct platform_device *pdev)
1789 {
1790 const struct cqspi_driver_platdata *ddata;
1791 struct reset_control *rstc, *rstc_ocp, *rstc_ref;
1792 struct device *dev = &pdev->dev;
1793 struct spi_controller *host;
1794 struct resource *res_ahb;
1795 struct cqspi_st *cqspi;
1796 int ret, irq;
1797
1798 host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi));
1799 if (!host)
1800 return -ENOMEM;
1801
1802 host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
1803 host->mem_ops = &cqspi_mem_ops;
1804 host->mem_caps = &cqspi_mem_caps;
1805
1806 cqspi = spi_controller_get_devdata(host);
1807 if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi"))
1808 cqspi->is_jh7110 = true;
1809 if (of_device_is_compatible(pdev->dev.of_node, "renesas,rzn1-qspi"))
1810 cqspi->is_rzn1 = true;
1811
1812 cqspi->pdev = pdev;
1813 cqspi->host = host;
1814 cqspi->ddata = ddata = of_device_get_match_data(dev);
1815 platform_set_drvdata(pdev, cqspi);
1816
1817 /* Obtain configuration from OF. */
1818 ret = cqspi_of_get_pdata(cqspi);
1819 if (ret) {
1820 dev_err(dev, "Cannot get mandatory OF data.\n");
1821 return -ENODEV;
1822 }
1823
1824 ret = cqspi_setup_flash(cqspi);
1825 if (ret) {
1826 dev_err(dev, "failed to setup flash parameters %d\n", ret);
1827 return ret;
1828 }
1829
1830 /* Obtain QSPI clocks. */
1831 memcpy(&cqspi->clks, &cqspi_clks, sizeof(cqspi->clks));
1832 ret = devm_clk_bulk_get_optional(dev, CLK_QSPI_NUM, cqspi->clks);
1833 if (ret)
1834 return dev_err_probe(dev, ret, "Failed to get clocks\n");
1835
1836 if (!cqspi->clks[CLK_QSPI_REF].clk) {
1837 dev_err(dev, "Cannot claim mandatory QSPI ref clock.\n");
1838 return -ENODEV;
1839 }
1840
1841 /* Obtain and remap controller address. */
1842 cqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
1843 if (IS_ERR(cqspi->iobase)) {
1844 dev_err(dev, "Cannot remap controller address.\n");
1845 ret = PTR_ERR(cqspi->iobase);
1846 return ret;
1847 }
1848
1849 /* Obtain and remap AHB address. */
1850 cqspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res_ahb);
1851 if (IS_ERR(cqspi->ahb_base)) {
1852 dev_err(dev, "Cannot remap AHB address.\n");
1853 ret = PTR_ERR(cqspi->ahb_base);
1854 return ret;
1855 }
1856 cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
1857 cqspi->ahb_size = resource_size(res_ahb);
1858
1859 init_completion(&cqspi->transfer_complete);
1860
1861 /* Obtain IRQ line. */
1862 irq = platform_get_irq(pdev, 0);
1863 if (irq < 0)
1864 return -ENXIO;
1865
1866 ret = pm_runtime_set_active(dev);
1867 if (ret)
1868 return ret;
1869
1870 ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks);
1871 if (ret) {
1872 dev_err(dev, "Cannot enable QSPI clocks.\n");
1873 goto disable_rpm;
1874 }
1875
1876 /* Obtain QSPI reset control */
1877 rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
1878 if (IS_ERR(rstc)) {
1879 ret = PTR_ERR(rstc);
1880 dev_err(dev, "Cannot get QSPI reset.\n");
1881 goto disable_clks;
1882 }
1883
1884 rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
1885 if (IS_ERR(rstc_ocp)) {
1886 ret = PTR_ERR(rstc_ocp);
1887 dev_err(dev, "Cannot get QSPI OCP reset.\n");
1888 goto disable_clks;
1889 }
1890
1891 if (cqspi->is_jh7110) {
1892 rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref");
1893 if (IS_ERR(rstc_ref)) {
1894 ret = PTR_ERR(rstc_ref);
1895 dev_err(dev, "Cannot get QSPI REF reset.\n");
1896 goto disable_clks;
1897 }
1898 reset_control_assert(rstc_ref);
1899 reset_control_deassert(rstc_ref);
1900 }
1901
1902 reset_control_assert(rstc);
1903 reset_control_deassert(rstc);
1904
1905 reset_control_assert(rstc_ocp);
1906 reset_control_deassert(rstc_ocp);
1907
1908 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clks[CLK_QSPI_REF].clk);
1909 if (!cqspi->is_rzn1) {
1910 host->max_speed_hz = cqspi->master_ref_clk_hz;
1911 } else {
1912 host->max_speed_hz = cqspi->master_ref_clk_hz / 2;
1913 host->min_speed_hz = cqspi->master_ref_clk_hz / 32;
1914 }
1915
1916 /* write completion is supported by default */
1917 cqspi->wr_completion = true;
1918
1919 if (ddata) {
1920 if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
1921 cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
1922 cqspi->master_ref_clk_hz);
1923 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
1924 host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
1925 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD)
1926 host->mode_bits |= SPI_TX_QUAD;
1927 if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
1928 cqspi->use_direct_mode = true;
1929 cqspi->use_direct_mode_wr = true;
1930 }
1931 if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
1932 cqspi->use_dma_read = true;
1933 if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
1934 cqspi->wr_completion = false;
1935 if (ddata->quirks & CQSPI_SLOW_SRAM)
1936 cqspi->slow_sram = true;
1937 if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR)
1938 cqspi->apb_ahb_hazard = true;
1939 if (ddata->quirks & CQSPI_DISABLE_STIG_MODE)
1940 cqspi->disable_stig_mode = true;
1941
1942 if (ddata->quirks & CQSPI_DMA_SET_MASK) {
1943 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1944 if (ret)
1945 goto disable_clks;
1946 }
1947 }
1948
1949 refcount_set(&cqspi->refcount, 1);
1950 refcount_set(&cqspi->inflight_ops, 1);
1951
1952 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
1953 pdev->name, cqspi);
1954 if (ret) {
1955 dev_err(dev, "Cannot request IRQ.\n");
1956 goto disable_clks;
1957 }
1958
1959 cqspi_wait_idle(cqspi);
1960 cqspi_controller_enable(cqspi, 0);
1961 cqspi_controller_detect_fifo_depth(cqspi);
1962 cqspi_controller_init(cqspi);
1963 cqspi_controller_enable(cqspi, 1);
1964 cqspi->current_cs = -1;
1965 cqspi->sclk = 0;
1966
1967 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
1968 pm_runtime_enable(dev);
1969 pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT);
1970 pm_runtime_use_autosuspend(dev);
1971 pm_runtime_get_noresume(dev);
1972 }
1973
1974 host->num_chipselect = cqspi->num_chipselect;
1975
1976 if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET))
1977 cqspi_device_reset(cqspi);
1978
1979 if (cqspi->use_direct_mode && !cqspi->is_rzn1) {
1980 ret = cqspi_request_mmap_dma(cqspi);
1981 if (ret == -EPROBE_DEFER) {
1982 dev_err_probe(&pdev->dev, ret, "Failed to request mmap DMA\n");
1983 goto disable_controller;
1984 }
1985 }
1986
1987 ret = spi_register_controller(host);
1988 if (ret) {
1989 dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
1990 goto release_dma_chan;
1991 }
1992
1993 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
1994 pm_runtime_put_autosuspend(dev);
1995
1996 return 0;
1997
1998 release_dma_chan:
1999 if (cqspi->rx_chan)
2000 dma_release_channel(cqspi->rx_chan);
2001 disable_controller:
2002 cqspi_controller_enable(cqspi, 0);
2003 disable_clks:
2004 if (pm_runtime_get_sync(&pdev->dev) >= 0)
2005 clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
2006 disable_rpm:
2007 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
2008 pm_runtime_disable(dev);
2009
2010 return ret;
2011 }
2012
cqspi_remove(struct platform_device * pdev)2013 static void cqspi_remove(struct platform_device *pdev)
2014 {
2015 const struct cqspi_driver_platdata *ddata;
2016 struct cqspi_st *cqspi = platform_get_drvdata(pdev);
2017 struct device *dev = &pdev->dev;
2018 int ret = 0;
2019
2020 ddata = of_device_get_match_data(dev);
2021
2022 refcount_set(&cqspi->refcount, 0);
2023
2024 if (!refcount_dec_and_test(&cqspi->inflight_ops))
2025 cqspi_wait_idle(cqspi);
2026
2027 spi_unregister_controller(cqspi->host);
2028
2029 if (cqspi->rx_chan)
2030 dma_release_channel(cqspi->rx_chan);
2031
2032 cqspi_controller_enable(cqspi, 0);
2033
2034
2035 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM)))
2036 ret = pm_runtime_get_sync(&pdev->dev);
2037
2038 if (ret >= 0)
2039 clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
2040
2041 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) {
2042 pm_runtime_put_sync(&pdev->dev);
2043 pm_runtime_disable(&pdev->dev);
2044 }
2045 }
2046
cqspi_runtime_suspend(struct device * dev)2047 static int cqspi_runtime_suspend(struct device *dev)
2048 {
2049 struct cqspi_st *cqspi = dev_get_drvdata(dev);
2050
2051 cqspi_controller_enable(cqspi, 0);
2052 clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks);
2053 return 0;
2054 }
2055
cqspi_runtime_resume(struct device * dev)2056 static int cqspi_runtime_resume(struct device *dev)
2057 {
2058 struct cqspi_st *cqspi = dev_get_drvdata(dev);
2059 int ret;
2060
2061 ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks);
2062 if (ret)
2063 return ret;
2064
2065 cqspi_wait_idle(cqspi);
2066 cqspi_controller_enable(cqspi, 0);
2067 cqspi_controller_init(cqspi);
2068 cqspi_controller_enable(cqspi, 1);
2069
2070 cqspi->current_cs = -1;
2071 cqspi->sclk = 0;
2072 return 0;
2073 }
2074
cqspi_suspend(struct device * dev)2075 static int cqspi_suspend(struct device *dev)
2076 {
2077 struct cqspi_st *cqspi = dev_get_drvdata(dev);
2078 int ret;
2079
2080 ret = spi_controller_suspend(cqspi->host);
2081 if (ret)
2082 return ret;
2083
2084 return pm_runtime_force_suspend(dev);
2085 }
2086
cqspi_resume(struct device * dev)2087 static int cqspi_resume(struct device *dev)
2088 {
2089 struct cqspi_st *cqspi = dev_get_drvdata(dev);
2090 int ret;
2091
2092 ret = pm_runtime_force_resume(dev);
2093 if (ret) {
2094 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
2095 return ret;
2096 }
2097
2098 return spi_controller_resume(cqspi->host);
2099 }
2100
2101 static const struct dev_pm_ops cqspi_dev_pm_ops = {
2102 RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL)
2103 SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume)
2104 };
2105
2106 static const struct cqspi_driver_platdata cdns_qspi = {
2107 .quirks = CQSPI_DISABLE_DAC_MODE,
2108 };
2109
2110 static const struct cqspi_driver_platdata k2g_qspi = {
2111 .quirks = CQSPI_NEEDS_WR_DELAY,
2112 };
2113
2114 static const struct cqspi_driver_platdata am654_ospi = {
2115 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD,
2116 .quirks = CQSPI_NEEDS_WR_DELAY,
2117 };
2118
2119 static const struct cqspi_driver_platdata intel_lgm_qspi = {
2120 .quirks = CQSPI_DISABLE_DAC_MODE,
2121 };
2122
2123 static const struct cqspi_driver_platdata socfpga_qspi = {
2124 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION |
2125 CQSPI_SLOW_SRAM | CQSPI_DISABLE_STIG_MODE |
2126 CQSPI_DISABLE_RUNTIME_PM,
2127 };
2128
2129 static const struct cqspi_driver_platdata versal_ospi = {
2130 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
2131 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA |
2132 CQSPI_DMA_SET_MASK,
2133 .indirect_read_dma = cqspi_versal_indirect_read_dma,
2134 .get_dma_status = cqspi_get_versal_dma_status,
2135 };
2136
2137 static const struct cqspi_driver_platdata versal2_ospi = {
2138 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
2139 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA |
2140 CQSPI_DMA_SET_MASK | CQSPI_SUPPORT_DEVICE_RESET,
2141 .indirect_read_dma = cqspi_versal_indirect_read_dma,
2142 .get_dma_status = cqspi_get_versal_dma_status,
2143 };
2144
2145 static const struct cqspi_driver_platdata jh7110_qspi = {
2146 .quirks = CQSPI_DISABLE_DAC_MODE,
2147 };
2148
2149 static const struct cqspi_driver_platdata pensando_cdns_qspi = {
2150 .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE,
2151 };
2152
2153 static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = {
2154 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
2155 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION |
2156 CQSPI_RD_NO_IRQ,
2157 };
2158
2159 static const struct cqspi_driver_platdata renesas_rzn1_qspi = {
2160 .hwcaps_mask = CQSPI_SUPPORTS_QUAD,
2161 .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION | CQSPI_RD_NO_IRQ |
2162 CQSPI_HAS_WR_PROTECT | CQSPI_NO_INDIRECT_MODE,
2163 };
2164
2165 static const struct of_device_id cqspi_dt_ids[] = {
2166 {
2167 .compatible = "cdns,qspi-nor",
2168 .data = &cdns_qspi,
2169 },
2170 {
2171 .compatible = "ti,k2g-qspi",
2172 .data = &k2g_qspi,
2173 },
2174 {
2175 .compatible = "ti,am654-ospi",
2176 .data = &am654_ospi,
2177 },
2178 {
2179 .compatible = "intel,lgm-qspi",
2180 .data = &intel_lgm_qspi,
2181 },
2182 {
2183 .compatible = "xlnx,versal-ospi-1.0",
2184 .data = &versal_ospi,
2185 },
2186 {
2187 .compatible = "intel,socfpga-qspi",
2188 .data = &socfpga_qspi,
2189 },
2190 {
2191 .compatible = "starfive,jh7110-qspi",
2192 .data = &jh7110_qspi,
2193 },
2194 {
2195 .compatible = "amd,pensando-elba-qspi",
2196 .data = &pensando_cdns_qspi,
2197 },
2198 {
2199 .compatible = "mobileye,eyeq5-ospi",
2200 .data = &mobileye_eyeq5_ospi,
2201 },
2202 {
2203 .compatible = "amd,versal2-ospi",
2204 .data = &versal2_ospi,
2205 },
2206 {
2207 .compatible = "renesas,rzn1-qspi",
2208 .data = &renesas_rzn1_qspi,
2209 },
2210 { /* end of table */ }
2211 };
2212
2213 MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
2214
2215 static struct platform_driver cqspi_platform_driver = {
2216 .probe = cqspi_probe,
2217 .remove = cqspi_remove,
2218 .driver = {
2219 .name = CQSPI_NAME,
2220 .pm = pm_ptr(&cqspi_dev_pm_ops),
2221 .of_match_table = cqspi_dt_ids,
2222 },
2223 };
2224
2225 module_platform_driver(cqspi_platform_driver);
2226
2227 MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
2228 MODULE_LICENSE("GPL v2");
2229 MODULE_ALIAS("platform:" CQSPI_NAME);
2230 MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
2231 MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
2232 MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
2233 MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
2234 MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
2235