xref: /linux/drivers/spi/spi-cadence-quadspi.c (revision c717993dd76a1049093af5c262e751d901b8da10)
1  // SPDX-License-Identifier: GPL-2.0-only
2  //
3  // Driver for Cadence QSPI Controller
4  //
5  // Copyright Altera Corporation (C) 2012-2014. All rights reserved.
6  // Copyright Intel Corporation (C) 2019-2020. All rights reserved.
7  // Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
8  
9  #include <linux/clk.h>
10  #include <linux/completion.h>
11  #include <linux/delay.h>
12  #include <linux/dma-mapping.h>
13  #include <linux/dmaengine.h>
14  #include <linux/err.h>
15  #include <linux/errno.h>
16  #include <linux/firmware/xlnx-zynqmp.h>
17  #include <linux/interrupt.h>
18  #include <linux/io.h>
19  #include <linux/iopoll.h>
20  #include <linux/jiffies.h>
21  #include <linux/kernel.h>
22  #include <linux/module.h>
23  #include <linux/of_device.h>
24  #include <linux/of.h>
25  #include <linux/platform_device.h>
26  #include <linux/pm_runtime.h>
27  #include <linux/reset.h>
28  #include <linux/sched.h>
29  #include <linux/spi/spi.h>
30  #include <linux/spi/spi-mem.h>
31  #include <linux/timer.h>
32  
33  #define CQSPI_NAME			"cadence-qspi"
34  #define CQSPI_MAX_CHIPSELECT		16
35  
36  /* Quirks */
37  #define CQSPI_NEEDS_WR_DELAY		BIT(0)
38  #define CQSPI_DISABLE_DAC_MODE		BIT(1)
39  #define CQSPI_SUPPORT_EXTERNAL_DMA	BIT(2)
40  #define CQSPI_NO_SUPPORT_WR_COMPLETION	BIT(3)
41  
42  /* Capabilities */
43  #define CQSPI_SUPPORTS_OCTAL		BIT(0)
44  
45  struct cqspi_st;
46  
47  struct cqspi_flash_pdata {
48  	struct cqspi_st	*cqspi;
49  	u32		clk_rate;
50  	u32		read_delay;
51  	u32		tshsl_ns;
52  	u32		tsd2d_ns;
53  	u32		tchsh_ns;
54  	u32		tslch_ns;
55  	u8		inst_width;
56  	u8		addr_width;
57  	u8		data_width;
58  	bool		dtr;
59  	u8		cs;
60  };
61  
62  struct cqspi_st {
63  	struct platform_device	*pdev;
64  
65  	struct clk		*clk;
66  	unsigned int		sclk;
67  
68  	void __iomem		*iobase;
69  	void __iomem		*ahb_base;
70  	resource_size_t		ahb_size;
71  	struct completion	transfer_complete;
72  
73  	struct dma_chan		*rx_chan;
74  	struct completion	rx_dma_complete;
75  	dma_addr_t		mmap_phys_base;
76  
77  	int			current_cs;
78  	unsigned long		master_ref_clk_hz;
79  	bool			is_decoded_cs;
80  	u32			fifo_depth;
81  	u32			fifo_width;
82  	u32			num_chipselect;
83  	bool			rclk_en;
84  	u32			trigger_address;
85  	u32			wr_delay;
86  	bool			use_direct_mode;
87  	struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
88  	bool			use_dma_read;
89  	u32			pd_dev_id;
90  	bool			wr_completion;
91  };
92  
93  struct cqspi_driver_platdata {
94  	u32 hwcaps_mask;
95  	u8 quirks;
96  	int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
97  				 u_char *rxbuf, loff_t from_addr, size_t n_rx);
98  	u32 (*get_dma_status)(struct cqspi_st *cqspi);
99  };
100  
101  /* Operation timeout value */
102  #define CQSPI_TIMEOUT_MS			500
103  #define CQSPI_READ_TIMEOUT_MS			10
104  
105  /* Instruction type */
106  #define CQSPI_INST_TYPE_SINGLE			0
107  #define CQSPI_INST_TYPE_DUAL			1
108  #define CQSPI_INST_TYPE_QUAD			2
109  #define CQSPI_INST_TYPE_OCTAL			3
110  
111  #define CQSPI_DUMMY_CLKS_PER_BYTE		8
112  #define CQSPI_DUMMY_BYTES_MAX			4
113  #define CQSPI_DUMMY_CLKS_MAX			31
114  
115  #define CQSPI_STIG_DATA_LEN_MAX			8
116  
117  /* Register map */
118  #define CQSPI_REG_CONFIG			0x00
119  #define CQSPI_REG_CONFIG_ENABLE_MASK		BIT(0)
120  #define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL	BIT(7)
121  #define CQSPI_REG_CONFIG_DECODE_MASK		BIT(9)
122  #define CQSPI_REG_CONFIG_CHIPSELECT_LSB		10
123  #define CQSPI_REG_CONFIG_DMA_MASK		BIT(15)
124  #define CQSPI_REG_CONFIG_BAUD_LSB		19
125  #define CQSPI_REG_CONFIG_DTR_PROTO		BIT(24)
126  #define CQSPI_REG_CONFIG_DUAL_OPCODE		BIT(30)
127  #define CQSPI_REG_CONFIG_IDLE_LSB		31
128  #define CQSPI_REG_CONFIG_CHIPSELECT_MASK	0xF
129  #define CQSPI_REG_CONFIG_BAUD_MASK		0xF
130  
131  #define CQSPI_REG_RD_INSTR			0x04
132  #define CQSPI_REG_RD_INSTR_OPCODE_LSB		0
133  #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB	8
134  #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB	12
135  #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB	16
136  #define CQSPI_REG_RD_INSTR_MODE_EN_LSB		20
137  #define CQSPI_REG_RD_INSTR_DUMMY_LSB		24
138  #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK	0x3
139  #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK	0x3
140  #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK	0x3
141  #define CQSPI_REG_RD_INSTR_DUMMY_MASK		0x1F
142  
143  #define CQSPI_REG_WR_INSTR			0x08
144  #define CQSPI_REG_WR_INSTR_OPCODE_LSB		0
145  #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB	12
146  #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB	16
147  
148  #define CQSPI_REG_DELAY				0x0C
149  #define CQSPI_REG_DELAY_TSLCH_LSB		0
150  #define CQSPI_REG_DELAY_TCHSH_LSB		8
151  #define CQSPI_REG_DELAY_TSD2D_LSB		16
152  #define CQSPI_REG_DELAY_TSHSL_LSB		24
153  #define CQSPI_REG_DELAY_TSLCH_MASK		0xFF
154  #define CQSPI_REG_DELAY_TCHSH_MASK		0xFF
155  #define CQSPI_REG_DELAY_TSD2D_MASK		0xFF
156  #define CQSPI_REG_DELAY_TSHSL_MASK		0xFF
157  
158  #define CQSPI_REG_READCAPTURE			0x10
159  #define CQSPI_REG_READCAPTURE_BYPASS_LSB	0
160  #define CQSPI_REG_READCAPTURE_DELAY_LSB		1
161  #define CQSPI_REG_READCAPTURE_DELAY_MASK	0xF
162  
163  #define CQSPI_REG_SIZE				0x14
164  #define CQSPI_REG_SIZE_ADDRESS_LSB		0
165  #define CQSPI_REG_SIZE_PAGE_LSB			4
166  #define CQSPI_REG_SIZE_BLOCK_LSB		16
167  #define CQSPI_REG_SIZE_ADDRESS_MASK		0xF
168  #define CQSPI_REG_SIZE_PAGE_MASK		0xFFF
169  #define CQSPI_REG_SIZE_BLOCK_MASK		0x3F
170  
171  #define CQSPI_REG_SRAMPARTITION			0x18
172  #define CQSPI_REG_INDIRECTTRIGGER		0x1C
173  
174  #define CQSPI_REG_DMA				0x20
175  #define CQSPI_REG_DMA_SINGLE_LSB		0
176  #define CQSPI_REG_DMA_BURST_LSB			8
177  #define CQSPI_REG_DMA_SINGLE_MASK		0xFF
178  #define CQSPI_REG_DMA_BURST_MASK		0xFF
179  
180  #define CQSPI_REG_REMAP				0x24
181  #define CQSPI_REG_MODE_BIT			0x28
182  
183  #define CQSPI_REG_SDRAMLEVEL			0x2C
184  #define CQSPI_REG_SDRAMLEVEL_RD_LSB		0
185  #define CQSPI_REG_SDRAMLEVEL_WR_LSB		16
186  #define CQSPI_REG_SDRAMLEVEL_RD_MASK		0xFFFF
187  #define CQSPI_REG_SDRAMLEVEL_WR_MASK		0xFFFF
188  
189  #define CQSPI_REG_WR_COMPLETION_CTRL		0x38
190  #define CQSPI_REG_WR_DISABLE_AUTO_POLL		BIT(14)
191  
192  #define CQSPI_REG_IRQSTATUS			0x40
193  #define CQSPI_REG_IRQMASK			0x44
194  
195  #define CQSPI_REG_INDIRECTRD			0x60
196  #define CQSPI_REG_INDIRECTRD_START_MASK		BIT(0)
197  #define CQSPI_REG_INDIRECTRD_CANCEL_MASK	BIT(1)
198  #define CQSPI_REG_INDIRECTRD_DONE_MASK		BIT(5)
199  
200  #define CQSPI_REG_INDIRECTRDWATERMARK		0x64
201  #define CQSPI_REG_INDIRECTRDSTARTADDR		0x68
202  #define CQSPI_REG_INDIRECTRDBYTES		0x6C
203  
204  #define CQSPI_REG_CMDCTRL			0x90
205  #define CQSPI_REG_CMDCTRL_EXECUTE_MASK		BIT(0)
206  #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK	BIT(1)
207  #define CQSPI_REG_CMDCTRL_DUMMY_LSB		7
208  #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB		12
209  #define CQSPI_REG_CMDCTRL_WR_EN_LSB		15
210  #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB		16
211  #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB		19
212  #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB		20
213  #define CQSPI_REG_CMDCTRL_RD_EN_LSB		23
214  #define CQSPI_REG_CMDCTRL_OPCODE_LSB		24
215  #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK		0x7
216  #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK	0x3
217  #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK		0x7
218  #define CQSPI_REG_CMDCTRL_DUMMY_MASK		0x1F
219  
220  #define CQSPI_REG_INDIRECTWR			0x70
221  #define CQSPI_REG_INDIRECTWR_START_MASK		BIT(0)
222  #define CQSPI_REG_INDIRECTWR_CANCEL_MASK	BIT(1)
223  #define CQSPI_REG_INDIRECTWR_DONE_MASK		BIT(5)
224  
225  #define CQSPI_REG_INDIRECTWRWATERMARK		0x74
226  #define CQSPI_REG_INDIRECTWRSTARTADDR		0x78
227  #define CQSPI_REG_INDIRECTWRBYTES		0x7C
228  
229  #define CQSPI_REG_INDTRIG_ADDRRANGE		0x80
230  
231  #define CQSPI_REG_CMDADDRESS			0x94
232  #define CQSPI_REG_CMDREADDATALOWER		0xA0
233  #define CQSPI_REG_CMDREADDATAUPPER		0xA4
234  #define CQSPI_REG_CMDWRITEDATALOWER		0xA8
235  #define CQSPI_REG_CMDWRITEDATAUPPER		0xAC
236  
237  #define CQSPI_REG_POLLING_STATUS		0xB0
238  #define CQSPI_REG_POLLING_STATUS_DUMMY_LSB	16
239  
240  #define CQSPI_REG_OP_EXT_LOWER			0xE0
241  #define CQSPI_REG_OP_EXT_READ_LSB		24
242  #define CQSPI_REG_OP_EXT_WRITE_LSB		16
243  #define CQSPI_REG_OP_EXT_STIG_LSB		0
244  
245  #define CQSPI_REG_VERSAL_DMA_SRC_ADDR		0x1000
246  
247  #define CQSPI_REG_VERSAL_DMA_DST_ADDR		0x1800
248  #define CQSPI_REG_VERSAL_DMA_DST_SIZE		0x1804
249  
250  #define CQSPI_REG_VERSAL_DMA_DST_CTRL		0x180C
251  
252  #define CQSPI_REG_VERSAL_DMA_DST_I_STS		0x1814
253  #define CQSPI_REG_VERSAL_DMA_DST_I_EN		0x1818
254  #define CQSPI_REG_VERSAL_DMA_DST_I_DIS		0x181C
255  #define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK	BIT(1)
256  
257  #define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB	0x1828
258  
259  #define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL	0xF43FFA00
260  #define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL	0x6
261  
262  /* Interrupt status bits */
263  #define CQSPI_REG_IRQ_MODE_ERR			BIT(0)
264  #define CQSPI_REG_IRQ_UNDERFLOW			BIT(1)
265  #define CQSPI_REG_IRQ_IND_COMP			BIT(2)
266  #define CQSPI_REG_IRQ_IND_RD_REJECT		BIT(3)
267  #define CQSPI_REG_IRQ_WR_PROTECTED_ERR		BIT(4)
268  #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR		BIT(5)
269  #define CQSPI_REG_IRQ_WATERMARK			BIT(6)
270  #define CQSPI_REG_IRQ_IND_SRAM_FULL		BIT(12)
271  
272  #define CQSPI_IRQ_MASK_RD		(CQSPI_REG_IRQ_WATERMARK	| \
273  					 CQSPI_REG_IRQ_IND_SRAM_FULL	| \
274  					 CQSPI_REG_IRQ_IND_COMP)
275  
276  #define CQSPI_IRQ_MASK_WR		(CQSPI_REG_IRQ_IND_COMP		| \
277  					 CQSPI_REG_IRQ_WATERMARK	| \
278  					 CQSPI_REG_IRQ_UNDERFLOW)
279  
280  #define CQSPI_IRQ_STATUS_MASK		0x1FFFF
281  #define CQSPI_DMA_UNALIGN		0x3
282  
283  #define CQSPI_REG_VERSAL_DMA_VAL		0x602
284  
285  static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
286  {
287  	u32 val;
288  
289  	return readl_relaxed_poll_timeout(reg, val,
290  					  (((clr ? ~val : val) & mask) == mask),
291  					  10, CQSPI_TIMEOUT_MS * 1000);
292  }
293  
294  static bool cqspi_is_idle(struct cqspi_st *cqspi)
295  {
296  	u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
297  
298  	return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB);
299  }
300  
301  static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
302  {
303  	u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
304  
305  	reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
306  	return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
307  }
308  
309  static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi)
310  {
311  	u32 dma_status;
312  
313  	dma_status = readl(cqspi->iobase +
314  					   CQSPI_REG_VERSAL_DMA_DST_I_STS);
315  	writel(dma_status, cqspi->iobase +
316  		   CQSPI_REG_VERSAL_DMA_DST_I_STS);
317  
318  	return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK;
319  }
320  
321  static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
322  {
323  	struct cqspi_st *cqspi = dev;
324  	unsigned int irq_status;
325  	struct device *device = &cqspi->pdev->dev;
326  	const struct cqspi_driver_platdata *ddata;
327  
328  	ddata = of_device_get_match_data(device);
329  
330  	/* Read interrupt status */
331  	irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
332  
333  	/* Clear interrupt */
334  	writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
335  
336  	if (cqspi->use_dma_read && ddata && ddata->get_dma_status) {
337  		if (ddata->get_dma_status(cqspi)) {
338  			complete(&cqspi->transfer_complete);
339  			return IRQ_HANDLED;
340  		}
341  	}
342  
343  	irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
344  
345  	if (irq_status)
346  		complete(&cqspi->transfer_complete);
347  
348  	return IRQ_HANDLED;
349  }
350  
351  static unsigned int cqspi_calc_rdreg(struct cqspi_flash_pdata *f_pdata)
352  {
353  	u32 rdreg = 0;
354  
355  	rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
356  	rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
357  	rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
358  
359  	return rdreg;
360  }
361  
362  static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr)
363  {
364  	unsigned int dummy_clk;
365  
366  	if (!op->dummy.nbytes)
367  		return 0;
368  
369  	dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth);
370  	if (dtr)
371  		dummy_clk /= 2;
372  
373  	return dummy_clk;
374  }
375  
376  static int cqspi_set_protocol(struct cqspi_flash_pdata *f_pdata,
377  			      const struct spi_mem_op *op)
378  {
379  	f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
380  	f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
381  	f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
382  
383  	/*
384  	 * For an op to be DTR, cmd phase along with every other non-empty
385  	 * phase should have dtr field set to 1. If an op phase has zero
386  	 * nbytes, ignore its dtr field; otherwise, check its dtr field.
387  	 */
388  	f_pdata->dtr = op->cmd.dtr &&
389  		       (!op->addr.nbytes || op->addr.dtr) &&
390  		       (!op->data.nbytes || op->data.dtr);
391  
392  	switch (op->data.buswidth) {
393  	case 0:
394  		break;
395  	case 1:
396  		f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
397  		break;
398  	case 2:
399  		f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
400  		break;
401  	case 4:
402  		f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
403  		break;
404  	case 8:
405  		f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
406  		break;
407  	default:
408  		return -EINVAL;
409  	}
410  
411  	/* Right now we only support 8-8-8 DTR mode. */
412  	if (f_pdata->dtr) {
413  		switch (op->cmd.buswidth) {
414  		case 0:
415  			break;
416  		case 8:
417  			f_pdata->inst_width = CQSPI_INST_TYPE_OCTAL;
418  			break;
419  		default:
420  			return -EINVAL;
421  		}
422  
423  		switch (op->addr.buswidth) {
424  		case 0:
425  			break;
426  		case 8:
427  			f_pdata->addr_width = CQSPI_INST_TYPE_OCTAL;
428  			break;
429  		default:
430  			return -EINVAL;
431  		}
432  
433  		switch (op->data.buswidth) {
434  		case 0:
435  			break;
436  		case 8:
437  			f_pdata->data_width = CQSPI_INST_TYPE_OCTAL;
438  			break;
439  		default:
440  			return -EINVAL;
441  		}
442  	}
443  
444  	return 0;
445  }
446  
447  static int cqspi_wait_idle(struct cqspi_st *cqspi)
448  {
449  	const unsigned int poll_idle_retry = 3;
450  	unsigned int count = 0;
451  	unsigned long timeout;
452  
453  	timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
454  	while (1) {
455  		/*
456  		 * Read few times in succession to ensure the controller
457  		 * is indeed idle, that is, the bit does not transition
458  		 * low again.
459  		 */
460  		if (cqspi_is_idle(cqspi))
461  			count++;
462  		else
463  			count = 0;
464  
465  		if (count >= poll_idle_retry)
466  			return 0;
467  
468  		if (time_after(jiffies, timeout)) {
469  			/* Timeout, in busy mode. */
470  			dev_err(&cqspi->pdev->dev,
471  				"QSPI is still busy after %dms timeout.\n",
472  				CQSPI_TIMEOUT_MS);
473  			return -ETIMEDOUT;
474  		}
475  
476  		cpu_relax();
477  	}
478  }
479  
480  static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
481  {
482  	void __iomem *reg_base = cqspi->iobase;
483  	int ret;
484  
485  	/* Write the CMDCTRL without start execution. */
486  	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
487  	/* Start execute */
488  	reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
489  	writel(reg, reg_base + CQSPI_REG_CMDCTRL);
490  
491  	/* Polling for completion. */
492  	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
493  				 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
494  	if (ret) {
495  		dev_err(&cqspi->pdev->dev,
496  			"Flash command execution timed out.\n");
497  		return ret;
498  	}
499  
500  	/* Polling QSPI idle status. */
501  	return cqspi_wait_idle(cqspi);
502  }
503  
504  static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata,
505  				  const struct spi_mem_op *op,
506  				  unsigned int shift)
507  {
508  	struct cqspi_st *cqspi = f_pdata->cqspi;
509  	void __iomem *reg_base = cqspi->iobase;
510  	unsigned int reg;
511  	u8 ext;
512  
513  	if (op->cmd.nbytes != 2)
514  		return -EINVAL;
515  
516  	/* Opcode extension is the LSB. */
517  	ext = op->cmd.opcode & 0xff;
518  
519  	reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER);
520  	reg &= ~(0xff << shift);
521  	reg |= ext << shift;
522  	writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER);
523  
524  	return 0;
525  }
526  
527  static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata,
528  			    const struct spi_mem_op *op, unsigned int shift,
529  			    bool enable)
530  {
531  	struct cqspi_st *cqspi = f_pdata->cqspi;
532  	void __iomem *reg_base = cqspi->iobase;
533  	unsigned int reg;
534  	int ret;
535  
536  	reg = readl(reg_base + CQSPI_REG_CONFIG);
537  
538  	/*
539  	 * We enable dual byte opcode here. The callers have to set up the
540  	 * extension opcode based on which type of operation it is.
541  	 */
542  	if (enable) {
543  		reg |= CQSPI_REG_CONFIG_DTR_PROTO;
544  		reg |= CQSPI_REG_CONFIG_DUAL_OPCODE;
545  
546  		/* Set up command opcode extension. */
547  		ret = cqspi_setup_opcode_ext(f_pdata, op, shift);
548  		if (ret)
549  			return ret;
550  	} else {
551  		reg &= ~CQSPI_REG_CONFIG_DTR_PROTO;
552  		reg &= ~CQSPI_REG_CONFIG_DUAL_OPCODE;
553  	}
554  
555  	writel(reg, reg_base + CQSPI_REG_CONFIG);
556  
557  	return cqspi_wait_idle(cqspi);
558  }
559  
560  static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
561  			      const struct spi_mem_op *op)
562  {
563  	struct cqspi_st *cqspi = f_pdata->cqspi;
564  	void __iomem *reg_base = cqspi->iobase;
565  	u8 *rxbuf = op->data.buf.in;
566  	u8 opcode;
567  	size_t n_rx = op->data.nbytes;
568  	unsigned int rdreg;
569  	unsigned int reg;
570  	unsigned int dummy_clk;
571  	size_t read_len;
572  	int status;
573  
574  	status = cqspi_set_protocol(f_pdata, op);
575  	if (status)
576  		return status;
577  
578  	status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB,
579  				  f_pdata->dtr);
580  	if (status)
581  		return status;
582  
583  	if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
584  		dev_err(&cqspi->pdev->dev,
585  			"Invalid input argument, len %zu rxbuf 0x%p\n",
586  			n_rx, rxbuf);
587  		return -EINVAL;
588  	}
589  
590  	if (f_pdata->dtr)
591  		opcode = op->cmd.opcode >> 8;
592  	else
593  		opcode = op->cmd.opcode;
594  
595  	reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
596  
597  	rdreg = cqspi_calc_rdreg(f_pdata);
598  	writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
599  
600  	dummy_clk = cqspi_calc_dummy(op, f_pdata->dtr);
601  	if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
602  		return -EOPNOTSUPP;
603  
604  	if (dummy_clk)
605  		reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK)
606  		     << CQSPI_REG_CMDCTRL_DUMMY_LSB;
607  
608  	reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
609  
610  	/* 0 means 1 byte. */
611  	reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
612  		<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
613  	status = cqspi_exec_flash_cmd(cqspi, reg);
614  	if (status)
615  		return status;
616  
617  	reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
618  
619  	/* Put the read value into rx_buf */
620  	read_len = (n_rx > 4) ? 4 : n_rx;
621  	memcpy(rxbuf, &reg, read_len);
622  	rxbuf += read_len;
623  
624  	if (n_rx > 4) {
625  		reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
626  
627  		read_len = n_rx - read_len;
628  		memcpy(rxbuf, &reg, read_len);
629  	}
630  
631  	return 0;
632  }
633  
634  static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
635  			       const struct spi_mem_op *op)
636  {
637  	struct cqspi_st *cqspi = f_pdata->cqspi;
638  	void __iomem *reg_base = cqspi->iobase;
639  	u8 opcode;
640  	const u8 *txbuf = op->data.buf.out;
641  	size_t n_tx = op->data.nbytes;
642  	unsigned int reg;
643  	unsigned int data;
644  	size_t write_len;
645  	int ret;
646  
647  	ret = cqspi_set_protocol(f_pdata, op);
648  	if (ret)
649  		return ret;
650  
651  	ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB,
652  			       f_pdata->dtr);
653  	if (ret)
654  		return ret;
655  
656  	if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) {
657  		dev_err(&cqspi->pdev->dev,
658  			"Invalid input argument, cmdlen %zu txbuf 0x%p\n",
659  			n_tx, txbuf);
660  		return -EINVAL;
661  	}
662  
663  	reg = cqspi_calc_rdreg(f_pdata);
664  	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
665  
666  	if (f_pdata->dtr)
667  		opcode = op->cmd.opcode >> 8;
668  	else
669  		opcode = op->cmd.opcode;
670  
671  	reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
672  
673  	if (op->addr.nbytes) {
674  		reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
675  		reg |= ((op->addr.nbytes - 1) &
676  			CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
677  			<< CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
678  
679  		writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
680  	}
681  
682  	if (n_tx) {
683  		reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
684  		reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
685  			<< CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
686  		data = 0;
687  		write_len = (n_tx > 4) ? 4 : n_tx;
688  		memcpy(&data, txbuf, write_len);
689  		txbuf += write_len;
690  		writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
691  
692  		if (n_tx > 4) {
693  			data = 0;
694  			write_len = n_tx - 4;
695  			memcpy(&data, txbuf, write_len);
696  			writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER);
697  		}
698  	}
699  
700  	return cqspi_exec_flash_cmd(cqspi, reg);
701  }
702  
703  static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
704  			    const struct spi_mem_op *op)
705  {
706  	struct cqspi_st *cqspi = f_pdata->cqspi;
707  	void __iomem *reg_base = cqspi->iobase;
708  	unsigned int dummy_clk = 0;
709  	unsigned int reg;
710  	int ret;
711  	u8 opcode;
712  
713  	ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB,
714  			       f_pdata->dtr);
715  	if (ret)
716  		return ret;
717  
718  	if (f_pdata->dtr)
719  		opcode = op->cmd.opcode >> 8;
720  	else
721  		opcode = op->cmd.opcode;
722  
723  	reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
724  	reg |= cqspi_calc_rdreg(f_pdata);
725  
726  	/* Setup dummy clock cycles */
727  	dummy_clk = cqspi_calc_dummy(op, f_pdata->dtr);
728  
729  	if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
730  		return -EOPNOTSUPP;
731  
732  	if (dummy_clk)
733  		reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
734  		       << CQSPI_REG_RD_INSTR_DUMMY_LSB;
735  
736  	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
737  
738  	/* Set address width */
739  	reg = readl(reg_base + CQSPI_REG_SIZE);
740  	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
741  	reg |= (op->addr.nbytes - 1);
742  	writel(reg, reg_base + CQSPI_REG_SIZE);
743  	return 0;
744  }
745  
746  static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
747  				       u8 *rxbuf, loff_t from_addr,
748  				       const size_t n_rx)
749  {
750  	struct cqspi_st *cqspi = f_pdata->cqspi;
751  	struct device *dev = &cqspi->pdev->dev;
752  	void __iomem *reg_base = cqspi->iobase;
753  	void __iomem *ahb_base = cqspi->ahb_base;
754  	unsigned int remaining = n_rx;
755  	unsigned int mod_bytes = n_rx % 4;
756  	unsigned int bytes_to_read = 0;
757  	u8 *rxbuf_end = rxbuf + n_rx;
758  	int ret = 0;
759  
760  	writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
761  	writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
762  
763  	/* Clear all interrupts. */
764  	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
765  
766  	writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
767  
768  	reinit_completion(&cqspi->transfer_complete);
769  	writel(CQSPI_REG_INDIRECTRD_START_MASK,
770  	       reg_base + CQSPI_REG_INDIRECTRD);
771  
772  	while (remaining > 0) {
773  		if (!wait_for_completion_timeout(&cqspi->transfer_complete,
774  						 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
775  			ret = -ETIMEDOUT;
776  
777  		bytes_to_read = cqspi_get_rd_sram_level(cqspi);
778  
779  		if (ret && bytes_to_read == 0) {
780  			dev_err(dev, "Indirect read timeout, no bytes\n");
781  			goto failrd;
782  		}
783  
784  		while (bytes_to_read != 0) {
785  			unsigned int word_remain = round_down(remaining, 4);
786  
787  			bytes_to_read *= cqspi->fifo_width;
788  			bytes_to_read = bytes_to_read > remaining ?
789  					remaining : bytes_to_read;
790  			bytes_to_read = round_down(bytes_to_read, 4);
791  			/* Read 4 byte word chunks then single bytes */
792  			if (bytes_to_read) {
793  				ioread32_rep(ahb_base, rxbuf,
794  					     (bytes_to_read / 4));
795  			} else if (!word_remain && mod_bytes) {
796  				unsigned int temp = ioread32(ahb_base);
797  
798  				bytes_to_read = mod_bytes;
799  				memcpy(rxbuf, &temp, min((unsigned int)
800  							 (rxbuf_end - rxbuf),
801  							 bytes_to_read));
802  			}
803  			rxbuf += bytes_to_read;
804  			remaining -= bytes_to_read;
805  			bytes_to_read = cqspi_get_rd_sram_level(cqspi);
806  		}
807  
808  		if (remaining > 0)
809  			reinit_completion(&cqspi->transfer_complete);
810  	}
811  
812  	/* Check indirect done status */
813  	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
814  				 CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
815  	if (ret) {
816  		dev_err(dev, "Indirect read completion error (%i)\n", ret);
817  		goto failrd;
818  	}
819  
820  	/* Disable interrupt */
821  	writel(0, reg_base + CQSPI_REG_IRQMASK);
822  
823  	/* Clear indirect completion status */
824  	writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
825  
826  	return 0;
827  
828  failrd:
829  	/* Disable interrupt */
830  	writel(0, reg_base + CQSPI_REG_IRQMASK);
831  
832  	/* Cancel the indirect read */
833  	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
834  	       reg_base + CQSPI_REG_INDIRECTRD);
835  	return ret;
836  }
837  
838  static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata,
839  					  u_char *rxbuf, loff_t from_addr,
840  					  size_t n_rx)
841  {
842  	struct cqspi_st *cqspi = f_pdata->cqspi;
843  	struct device *dev = &cqspi->pdev->dev;
844  	void __iomem *reg_base = cqspi->iobase;
845  	u32 reg, bytes_to_dma;
846  	loff_t addr = from_addr;
847  	void *buf = rxbuf;
848  	dma_addr_t dma_addr;
849  	u8 bytes_rem;
850  	int ret = 0;
851  
852  	bytes_rem = n_rx % 4;
853  	bytes_to_dma = (n_rx - bytes_rem);
854  
855  	if (!bytes_to_dma)
856  		goto nondmard;
857  
858  	ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA);
859  	if (ret)
860  		return ret;
861  
862  	reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
863  	reg |= CQSPI_REG_CONFIG_DMA_MASK;
864  	writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
865  
866  	dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE);
867  	if (dma_mapping_error(dev, dma_addr)) {
868  		dev_err(dev, "dma mapping failed\n");
869  		return -ENOMEM;
870  	}
871  
872  	writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
873  	writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
874  	writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL,
875  	       reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
876  
877  	/* Clear all interrupts. */
878  	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
879  
880  	/* Enable DMA done interrupt */
881  	writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK,
882  	       reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN);
883  
884  	/* Default DMA periph configuration */
885  	writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA);
886  
887  	/* Configure DMA Dst address */
888  	writel(lower_32_bits(dma_addr),
889  	       reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR);
890  	writel(upper_32_bits(dma_addr),
891  	       reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB);
892  
893  	/* Configure DMA Src address */
894  	writel(cqspi->trigger_address, reg_base +
895  	       CQSPI_REG_VERSAL_DMA_SRC_ADDR);
896  
897  	/* Set DMA destination size */
898  	writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE);
899  
900  	/* Set DMA destination control */
901  	writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL,
902  	       reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL);
903  
904  	writel(CQSPI_REG_INDIRECTRD_START_MASK,
905  	       reg_base + CQSPI_REG_INDIRECTRD);
906  
907  	reinit_completion(&cqspi->transfer_complete);
908  
909  	if (!wait_for_completion_timeout(&cqspi->transfer_complete,
910  					 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
911  		ret = -ETIMEDOUT;
912  		goto failrd;
913  	}
914  
915  	/* Disable DMA interrupt */
916  	writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
917  
918  	/* Clear indirect completion status */
919  	writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
920  	       cqspi->iobase + CQSPI_REG_INDIRECTRD);
921  	dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
922  
923  	reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
924  	reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
925  	writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
926  
927  	ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id,
928  					PM_OSPI_MUX_SEL_LINEAR);
929  	if (ret)
930  		return ret;
931  
932  nondmard:
933  	if (bytes_rem) {
934  		addr += bytes_to_dma;
935  		buf += bytes_to_dma;
936  		ret = cqspi_indirect_read_execute(f_pdata, buf, addr,
937  						  bytes_rem);
938  		if (ret)
939  			return ret;
940  	}
941  
942  	return 0;
943  
944  failrd:
945  	/* Disable DMA interrupt */
946  	writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
947  
948  	/* Cancel the indirect read */
949  	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
950  	       reg_base + CQSPI_REG_INDIRECTRD);
951  
952  	dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
953  
954  	reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
955  	reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
956  	writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
957  
958  	zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR);
959  
960  	return ret;
961  }
962  
963  static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
964  			     const struct spi_mem_op *op)
965  {
966  	unsigned int reg;
967  	int ret;
968  	struct cqspi_st *cqspi = f_pdata->cqspi;
969  	void __iomem *reg_base = cqspi->iobase;
970  	u8 opcode;
971  
972  	ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB,
973  			       f_pdata->dtr);
974  	if (ret)
975  		return ret;
976  
977  	if (f_pdata->dtr)
978  		opcode = op->cmd.opcode >> 8;
979  	else
980  		opcode = op->cmd.opcode;
981  
982  	/* Set opcode. */
983  	reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
984  	reg |= f_pdata->data_width << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB;
985  	reg |= f_pdata->addr_width << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB;
986  	writel(reg, reg_base + CQSPI_REG_WR_INSTR);
987  	reg = cqspi_calc_rdreg(f_pdata);
988  	writel(reg, reg_base + CQSPI_REG_RD_INSTR);
989  
990  	/*
991  	 * SPI NAND flashes require the address of the status register to be
992  	 * passed in the Read SR command. Also, some SPI NOR flashes like the
993  	 * cypress Semper flash expect a 4-byte dummy address in the Read SR
994  	 * command in DTR mode.
995  	 *
996  	 * But this controller does not support address phase in the Read SR
997  	 * command when doing auto-HW polling. So, disable write completion
998  	 * polling on the controller's side. spinand and spi-nor will take
999  	 * care of polling the status register.
1000  	 */
1001  	if (cqspi->wr_completion) {
1002  		reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
1003  		reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
1004  		writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
1005  	}
1006  
1007  	reg = readl(reg_base + CQSPI_REG_SIZE);
1008  	reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
1009  	reg |= (op->addr.nbytes - 1);
1010  	writel(reg, reg_base + CQSPI_REG_SIZE);
1011  	return 0;
1012  }
1013  
1014  static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata,
1015  					loff_t to_addr, const u8 *txbuf,
1016  					const size_t n_tx)
1017  {
1018  	struct cqspi_st *cqspi = f_pdata->cqspi;
1019  	struct device *dev = &cqspi->pdev->dev;
1020  	void __iomem *reg_base = cqspi->iobase;
1021  	unsigned int remaining = n_tx;
1022  	unsigned int write_bytes;
1023  	int ret;
1024  
1025  	writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
1026  	writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
1027  
1028  	/* Clear all interrupts. */
1029  	writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
1030  
1031  	writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
1032  
1033  	reinit_completion(&cqspi->transfer_complete);
1034  	writel(CQSPI_REG_INDIRECTWR_START_MASK,
1035  	       reg_base + CQSPI_REG_INDIRECTWR);
1036  	/*
1037  	 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access
1038  	 * Controller programming sequence, couple of cycles of
1039  	 * QSPI_REF_CLK delay is required for the above bit to
1040  	 * be internally synchronized by the QSPI module. Provide 5
1041  	 * cycles of delay.
1042  	 */
1043  	if (cqspi->wr_delay)
1044  		ndelay(cqspi->wr_delay);
1045  
1046  	while (remaining > 0) {
1047  		size_t write_words, mod_bytes;
1048  
1049  		write_bytes = remaining;
1050  		write_words = write_bytes / 4;
1051  		mod_bytes = write_bytes % 4;
1052  		/* Write 4 bytes at a time then single bytes. */
1053  		if (write_words) {
1054  			iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
1055  			txbuf += (write_words * 4);
1056  		}
1057  		if (mod_bytes) {
1058  			unsigned int temp = 0xFFFFFFFF;
1059  
1060  			memcpy(&temp, txbuf, mod_bytes);
1061  			iowrite32(temp, cqspi->ahb_base);
1062  			txbuf += mod_bytes;
1063  		}
1064  
1065  		if (!wait_for_completion_timeout(&cqspi->transfer_complete,
1066  						 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
1067  			dev_err(dev, "Indirect write timeout\n");
1068  			ret = -ETIMEDOUT;
1069  			goto failwr;
1070  		}
1071  
1072  		remaining -= write_bytes;
1073  
1074  		if (remaining > 0)
1075  			reinit_completion(&cqspi->transfer_complete);
1076  	}
1077  
1078  	/* Check indirect done status */
1079  	ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
1080  				 CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
1081  	if (ret) {
1082  		dev_err(dev, "Indirect write completion error (%i)\n", ret);
1083  		goto failwr;
1084  	}
1085  
1086  	/* Disable interrupt. */
1087  	writel(0, reg_base + CQSPI_REG_IRQMASK);
1088  
1089  	/* Clear indirect completion status */
1090  	writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
1091  
1092  	cqspi_wait_idle(cqspi);
1093  
1094  	return 0;
1095  
1096  failwr:
1097  	/* Disable interrupt. */
1098  	writel(0, reg_base + CQSPI_REG_IRQMASK);
1099  
1100  	/* Cancel the indirect write */
1101  	writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
1102  	       reg_base + CQSPI_REG_INDIRECTWR);
1103  	return ret;
1104  }
1105  
1106  static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata)
1107  {
1108  	struct cqspi_st *cqspi = f_pdata->cqspi;
1109  	void __iomem *reg_base = cqspi->iobase;
1110  	unsigned int chip_select = f_pdata->cs;
1111  	unsigned int reg;
1112  
1113  	reg = readl(reg_base + CQSPI_REG_CONFIG);
1114  	if (cqspi->is_decoded_cs) {
1115  		reg |= CQSPI_REG_CONFIG_DECODE_MASK;
1116  	} else {
1117  		reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
1118  
1119  		/* Convert CS if without decoder.
1120  		 * CS0 to 4b'1110
1121  		 * CS1 to 4b'1101
1122  		 * CS2 to 4b'1011
1123  		 * CS3 to 4b'0111
1124  		 */
1125  		chip_select = 0xF & ~(1 << chip_select);
1126  	}
1127  
1128  	reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
1129  		 << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
1130  	reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
1131  	    << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
1132  	writel(reg, reg_base + CQSPI_REG_CONFIG);
1133  }
1134  
1135  static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
1136  					   const unsigned int ns_val)
1137  {
1138  	unsigned int ticks;
1139  
1140  	ticks = ref_clk_hz / 1000;	/* kHz */
1141  	ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
1142  
1143  	return ticks;
1144  }
1145  
1146  static void cqspi_delay(struct cqspi_flash_pdata *f_pdata)
1147  {
1148  	struct cqspi_st *cqspi = f_pdata->cqspi;
1149  	void __iomem *iobase = cqspi->iobase;
1150  	const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
1151  	unsigned int tshsl, tchsh, tslch, tsd2d;
1152  	unsigned int reg;
1153  	unsigned int tsclk;
1154  
1155  	/* calculate the number of ref ticks for one sclk tick */
1156  	tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
1157  
1158  	tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
1159  	/* this particular value must be at least one sclk */
1160  	if (tshsl < tsclk)
1161  		tshsl = tsclk;
1162  
1163  	tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
1164  	tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
1165  	tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
1166  
1167  	reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
1168  	       << CQSPI_REG_DELAY_TSHSL_LSB;
1169  	reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
1170  		<< CQSPI_REG_DELAY_TCHSH_LSB;
1171  	reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
1172  		<< CQSPI_REG_DELAY_TSLCH_LSB;
1173  	reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
1174  		<< CQSPI_REG_DELAY_TSD2D_LSB;
1175  	writel(reg, iobase + CQSPI_REG_DELAY);
1176  }
1177  
1178  static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
1179  {
1180  	const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
1181  	void __iomem *reg_base = cqspi->iobase;
1182  	u32 reg, div;
1183  
1184  	/* Recalculate the baudrate divisor based on QSPI specification. */
1185  	div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
1186  
1187  	reg = readl(reg_base + CQSPI_REG_CONFIG);
1188  	reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
1189  	reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
1190  	writel(reg, reg_base + CQSPI_REG_CONFIG);
1191  }
1192  
1193  static void cqspi_readdata_capture(struct cqspi_st *cqspi,
1194  				   const bool bypass,
1195  				   const unsigned int delay)
1196  {
1197  	void __iomem *reg_base = cqspi->iobase;
1198  	unsigned int reg;
1199  
1200  	reg = readl(reg_base + CQSPI_REG_READCAPTURE);
1201  
1202  	if (bypass)
1203  		reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
1204  	else
1205  		reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
1206  
1207  	reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
1208  		 << CQSPI_REG_READCAPTURE_DELAY_LSB);
1209  
1210  	reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
1211  		<< CQSPI_REG_READCAPTURE_DELAY_LSB;
1212  
1213  	writel(reg, reg_base + CQSPI_REG_READCAPTURE);
1214  }
1215  
1216  static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
1217  {
1218  	void __iomem *reg_base = cqspi->iobase;
1219  	unsigned int reg;
1220  
1221  	reg = readl(reg_base + CQSPI_REG_CONFIG);
1222  
1223  	if (enable)
1224  		reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
1225  	else
1226  		reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
1227  
1228  	writel(reg, reg_base + CQSPI_REG_CONFIG);
1229  }
1230  
1231  static void cqspi_configure(struct cqspi_flash_pdata *f_pdata,
1232  			    unsigned long sclk)
1233  {
1234  	struct cqspi_st *cqspi = f_pdata->cqspi;
1235  	int switch_cs = (cqspi->current_cs != f_pdata->cs);
1236  	int switch_ck = (cqspi->sclk != sclk);
1237  
1238  	if (switch_cs || switch_ck)
1239  		cqspi_controller_enable(cqspi, 0);
1240  
1241  	/* Switch chip select. */
1242  	if (switch_cs) {
1243  		cqspi->current_cs = f_pdata->cs;
1244  		cqspi_chipselect(f_pdata);
1245  	}
1246  
1247  	/* Setup baudrate divisor and delays */
1248  	if (switch_ck) {
1249  		cqspi->sclk = sclk;
1250  		cqspi_config_baudrate_div(cqspi);
1251  		cqspi_delay(f_pdata);
1252  		cqspi_readdata_capture(cqspi, !cqspi->rclk_en,
1253  				       f_pdata->read_delay);
1254  	}
1255  
1256  	if (switch_cs || switch_ck)
1257  		cqspi_controller_enable(cqspi, 1);
1258  }
1259  
1260  static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
1261  			   const struct spi_mem_op *op)
1262  {
1263  	struct cqspi_st *cqspi = f_pdata->cqspi;
1264  	loff_t to = op->addr.val;
1265  	size_t len = op->data.nbytes;
1266  	const u_char *buf = op->data.buf.out;
1267  	int ret;
1268  
1269  	ret = cqspi_set_protocol(f_pdata, op);
1270  	if (ret)
1271  		return ret;
1272  
1273  	ret = cqspi_write_setup(f_pdata, op);
1274  	if (ret)
1275  		return ret;
1276  
1277  	/*
1278  	 * Some flashes like the Cypress Semper flash expect a dummy 4-byte
1279  	 * address (all 0s) with the read status register command in DTR mode.
1280  	 * But this controller does not support sending dummy address bytes to
1281  	 * the flash when it is polling the write completion register in DTR
1282  	 * mode. So, we can not use direct mode when in DTR mode for writing
1283  	 * data.
1284  	 */
1285  	if (!f_pdata->dtr && cqspi->use_direct_mode &&
1286  	    ((to + len) <= cqspi->ahb_size)) {
1287  		memcpy_toio(cqspi->ahb_base + to, buf, len);
1288  		return cqspi_wait_idle(cqspi);
1289  	}
1290  
1291  	return cqspi_indirect_write_execute(f_pdata, to, buf, len);
1292  }
1293  
1294  static void cqspi_rx_dma_callback(void *param)
1295  {
1296  	struct cqspi_st *cqspi = param;
1297  
1298  	complete(&cqspi->rx_dma_complete);
1299  }
1300  
1301  static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata,
1302  				     u_char *buf, loff_t from, size_t len)
1303  {
1304  	struct cqspi_st *cqspi = f_pdata->cqspi;
1305  	struct device *dev = &cqspi->pdev->dev;
1306  	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
1307  	dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from;
1308  	int ret = 0;
1309  	struct dma_async_tx_descriptor *tx;
1310  	dma_cookie_t cookie;
1311  	dma_addr_t dma_dst;
1312  	struct device *ddev;
1313  
1314  	if (!cqspi->rx_chan || !virt_addr_valid(buf)) {
1315  		memcpy_fromio(buf, cqspi->ahb_base + from, len);
1316  		return 0;
1317  	}
1318  
1319  	ddev = cqspi->rx_chan->device->dev;
1320  	dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE);
1321  	if (dma_mapping_error(ddev, dma_dst)) {
1322  		dev_err(dev, "dma mapping failed\n");
1323  		return -ENOMEM;
1324  	}
1325  	tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src,
1326  				       len, flags);
1327  	if (!tx) {
1328  		dev_err(dev, "device_prep_dma_memcpy error\n");
1329  		ret = -EIO;
1330  		goto err_unmap;
1331  	}
1332  
1333  	tx->callback = cqspi_rx_dma_callback;
1334  	tx->callback_param = cqspi;
1335  	cookie = tx->tx_submit(tx);
1336  	reinit_completion(&cqspi->rx_dma_complete);
1337  
1338  	ret = dma_submit_error(cookie);
1339  	if (ret) {
1340  		dev_err(dev, "dma_submit_error %d\n", cookie);
1341  		ret = -EIO;
1342  		goto err_unmap;
1343  	}
1344  
1345  	dma_async_issue_pending(cqspi->rx_chan);
1346  	if (!wait_for_completion_timeout(&cqspi->rx_dma_complete,
1347  					 msecs_to_jiffies(max_t(size_t, len, 500)))) {
1348  		dmaengine_terminate_sync(cqspi->rx_chan);
1349  		dev_err(dev, "DMA wait_for_completion_timeout\n");
1350  		ret = -ETIMEDOUT;
1351  		goto err_unmap;
1352  	}
1353  
1354  err_unmap:
1355  	dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE);
1356  
1357  	return ret;
1358  }
1359  
1360  static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
1361  			  const struct spi_mem_op *op)
1362  {
1363  	struct cqspi_st *cqspi = f_pdata->cqspi;
1364  	struct device *dev = &cqspi->pdev->dev;
1365  	const struct cqspi_driver_platdata *ddata;
1366  	loff_t from = op->addr.val;
1367  	size_t len = op->data.nbytes;
1368  	u_char *buf = op->data.buf.in;
1369  	u64 dma_align = (u64)(uintptr_t)buf;
1370  	int ret;
1371  
1372  	ddata = of_device_get_match_data(dev);
1373  	ret = cqspi_set_protocol(f_pdata, op);
1374  	if (ret)
1375  		return ret;
1376  
1377  	ret = cqspi_read_setup(f_pdata, op);
1378  	if (ret)
1379  		return ret;
1380  
1381  	if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
1382  		return cqspi_direct_read_execute(f_pdata, buf, from, len);
1383  
1384  	if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
1385  	    virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0))
1386  		return ddata->indirect_read_dma(f_pdata, buf, from, len);
1387  
1388  	return cqspi_indirect_read_execute(f_pdata, buf, from, len);
1389  }
1390  
1391  static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
1392  {
1393  	struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
1394  	struct cqspi_flash_pdata *f_pdata;
1395  
1396  	f_pdata = &cqspi->f_pdata[mem->spi->chip_select];
1397  	cqspi_configure(f_pdata, mem->spi->max_speed_hz);
1398  
1399  	if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
1400  		if (!op->addr.nbytes)
1401  			return cqspi_command_read(f_pdata, op);
1402  
1403  		return cqspi_read(f_pdata, op);
1404  	}
1405  
1406  	if (!op->addr.nbytes || !op->data.buf.out)
1407  		return cqspi_command_write(f_pdata, op);
1408  
1409  	return cqspi_write(f_pdata, op);
1410  }
1411  
1412  static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
1413  {
1414  	int ret;
1415  
1416  	ret = cqspi_mem_process(mem, op);
1417  	if (ret)
1418  		dev_err(&mem->spi->dev, "operation failed with %d\n", ret);
1419  
1420  	return ret;
1421  }
1422  
1423  static bool cqspi_supports_mem_op(struct spi_mem *mem,
1424  				  const struct spi_mem_op *op)
1425  {
1426  	bool all_true, all_false;
1427  
1428  	/*
1429  	 * op->dummy.dtr is required for converting nbytes into ncycles.
1430  	 * Also, don't check the dtr field of the op phase having zero nbytes.
1431  	 */
1432  	all_true = op->cmd.dtr &&
1433  		   (!op->addr.nbytes || op->addr.dtr) &&
1434  		   (!op->dummy.nbytes || op->dummy.dtr) &&
1435  		   (!op->data.nbytes || op->data.dtr);
1436  
1437  	all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr &&
1438  		    !op->data.dtr;
1439  
1440  	/* Mixed DTR modes not supported. */
1441  	if (!(all_true || all_false))
1442  		return false;
1443  
1444  	if (all_true)
1445  		return spi_mem_dtr_supports_op(mem, op);
1446  	else
1447  		return spi_mem_default_supports_op(mem, op);
1448  }
1449  
1450  static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
1451  				    struct cqspi_flash_pdata *f_pdata,
1452  				    struct device_node *np)
1453  {
1454  	if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
1455  		dev_err(&pdev->dev, "couldn't determine read-delay\n");
1456  		return -ENXIO;
1457  	}
1458  
1459  	if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
1460  		dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
1461  		return -ENXIO;
1462  	}
1463  
1464  	if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
1465  		dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
1466  		return -ENXIO;
1467  	}
1468  
1469  	if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
1470  		dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
1471  		return -ENXIO;
1472  	}
1473  
1474  	if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
1475  		dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
1476  		return -ENXIO;
1477  	}
1478  
1479  	if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
1480  		dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
1481  		return -ENXIO;
1482  	}
1483  
1484  	return 0;
1485  }
1486  
1487  static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
1488  {
1489  	struct device *dev = &cqspi->pdev->dev;
1490  	struct device_node *np = dev->of_node;
1491  	u32 id[2];
1492  
1493  	cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
1494  
1495  	if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
1496  		dev_err(dev, "couldn't determine fifo-depth\n");
1497  		return -ENXIO;
1498  	}
1499  
1500  	if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
1501  		dev_err(dev, "couldn't determine fifo-width\n");
1502  		return -ENXIO;
1503  	}
1504  
1505  	if (of_property_read_u32(np, "cdns,trigger-address",
1506  				 &cqspi->trigger_address)) {
1507  		dev_err(dev, "couldn't determine trigger-address\n");
1508  		return -ENXIO;
1509  	}
1510  
1511  	if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect))
1512  		cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT;
1513  
1514  	cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
1515  
1516  	if (!of_property_read_u32_array(np, "power-domains", id,
1517  					ARRAY_SIZE(id)))
1518  		cqspi->pd_dev_id = id[1];
1519  
1520  	return 0;
1521  }
1522  
1523  static void cqspi_controller_init(struct cqspi_st *cqspi)
1524  {
1525  	u32 reg;
1526  
1527  	cqspi_controller_enable(cqspi, 0);
1528  
1529  	/* Configure the remap address register, no remap */
1530  	writel(0, cqspi->iobase + CQSPI_REG_REMAP);
1531  
1532  	/* Disable all interrupts. */
1533  	writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
1534  
1535  	/* Configure the SRAM split to 1:1 . */
1536  	writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
1537  
1538  	/* Load indirect trigger address. */
1539  	writel(cqspi->trigger_address,
1540  	       cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
1541  
1542  	/* Program read watermark -- 1/2 of the FIFO. */
1543  	writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
1544  	       cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
1545  	/* Program write watermark -- 1/8 of the FIFO. */
1546  	writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
1547  	       cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
1548  
1549  	/* Disable direct access controller */
1550  	if (!cqspi->use_direct_mode) {
1551  		reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1552  		reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL;
1553  		writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1554  	}
1555  
1556  	/* Enable DMA interface */
1557  	if (cqspi->use_dma_read) {
1558  		reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
1559  		reg |= CQSPI_REG_CONFIG_DMA_MASK;
1560  		writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
1561  	}
1562  
1563  	cqspi_controller_enable(cqspi, 1);
1564  }
1565  
1566  static int cqspi_request_mmap_dma(struct cqspi_st *cqspi)
1567  {
1568  	dma_cap_mask_t mask;
1569  
1570  	dma_cap_zero(mask);
1571  	dma_cap_set(DMA_MEMCPY, mask);
1572  
1573  	cqspi->rx_chan = dma_request_chan_by_mask(&mask);
1574  	if (IS_ERR(cqspi->rx_chan)) {
1575  		int ret = PTR_ERR(cqspi->rx_chan);
1576  		cqspi->rx_chan = NULL;
1577  		return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n");
1578  	}
1579  	init_completion(&cqspi->rx_dma_complete);
1580  
1581  	return 0;
1582  }
1583  
1584  static const char *cqspi_get_name(struct spi_mem *mem)
1585  {
1586  	struct cqspi_st *cqspi = spi_master_get_devdata(mem->spi->master);
1587  	struct device *dev = &cqspi->pdev->dev;
1588  
1589  	return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), mem->spi->chip_select);
1590  }
1591  
1592  static const struct spi_controller_mem_ops cqspi_mem_ops = {
1593  	.exec_op = cqspi_exec_mem_op,
1594  	.get_name = cqspi_get_name,
1595  	.supports_op = cqspi_supports_mem_op,
1596  };
1597  
1598  static int cqspi_setup_flash(struct cqspi_st *cqspi)
1599  {
1600  	struct platform_device *pdev = cqspi->pdev;
1601  	struct device *dev = &pdev->dev;
1602  	struct device_node *np = dev->of_node;
1603  	struct cqspi_flash_pdata *f_pdata;
1604  	unsigned int cs;
1605  	int ret;
1606  
1607  	/* Get flash device data */
1608  	for_each_available_child_of_node(dev->of_node, np) {
1609  		ret = of_property_read_u32(np, "reg", &cs);
1610  		if (ret) {
1611  			dev_err(dev, "Couldn't determine chip select.\n");
1612  			of_node_put(np);
1613  			return ret;
1614  		}
1615  
1616  		if (cs >= CQSPI_MAX_CHIPSELECT) {
1617  			dev_err(dev, "Chip select %d out of range.\n", cs);
1618  			of_node_put(np);
1619  			return -EINVAL;
1620  		}
1621  
1622  		f_pdata = &cqspi->f_pdata[cs];
1623  		f_pdata->cqspi = cqspi;
1624  		f_pdata->cs = cs;
1625  
1626  		ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
1627  		if (ret) {
1628  			of_node_put(np);
1629  			return ret;
1630  		}
1631  	}
1632  
1633  	return 0;
1634  }
1635  
1636  static int cqspi_probe(struct platform_device *pdev)
1637  {
1638  	const struct cqspi_driver_platdata *ddata;
1639  	struct reset_control *rstc, *rstc_ocp;
1640  	struct device *dev = &pdev->dev;
1641  	struct spi_master *master;
1642  	struct resource *res_ahb;
1643  	struct cqspi_st *cqspi;
1644  	struct resource *res;
1645  	int ret;
1646  	int irq;
1647  
1648  	master = spi_alloc_master(&pdev->dev, sizeof(*cqspi));
1649  	if (!master) {
1650  		dev_err(&pdev->dev, "spi_alloc_master failed\n");
1651  		return -ENOMEM;
1652  	}
1653  	master->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL;
1654  	master->mem_ops = &cqspi_mem_ops;
1655  	master->dev.of_node = pdev->dev.of_node;
1656  
1657  	cqspi = spi_master_get_devdata(master);
1658  
1659  	cqspi->pdev = pdev;
1660  	platform_set_drvdata(pdev, cqspi);
1661  
1662  	/* Obtain configuration from OF. */
1663  	ret = cqspi_of_get_pdata(cqspi);
1664  	if (ret) {
1665  		dev_err(dev, "Cannot get mandatory OF data.\n");
1666  		ret = -ENODEV;
1667  		goto probe_master_put;
1668  	}
1669  
1670  	/* Obtain QSPI clock. */
1671  	cqspi->clk = devm_clk_get(dev, NULL);
1672  	if (IS_ERR(cqspi->clk)) {
1673  		dev_err(dev, "Cannot claim QSPI clock.\n");
1674  		ret = PTR_ERR(cqspi->clk);
1675  		goto probe_master_put;
1676  	}
1677  
1678  	/* Obtain and remap controller address. */
1679  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1680  	cqspi->iobase = devm_ioremap_resource(dev, res);
1681  	if (IS_ERR(cqspi->iobase)) {
1682  		dev_err(dev, "Cannot remap controller address.\n");
1683  		ret = PTR_ERR(cqspi->iobase);
1684  		goto probe_master_put;
1685  	}
1686  
1687  	/* Obtain and remap AHB address. */
1688  	res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1689  	cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
1690  	if (IS_ERR(cqspi->ahb_base)) {
1691  		dev_err(dev, "Cannot remap AHB address.\n");
1692  		ret = PTR_ERR(cqspi->ahb_base);
1693  		goto probe_master_put;
1694  	}
1695  	cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;
1696  	cqspi->ahb_size = resource_size(res_ahb);
1697  
1698  	init_completion(&cqspi->transfer_complete);
1699  
1700  	/* Obtain IRQ line. */
1701  	irq = platform_get_irq(pdev, 0);
1702  	if (irq < 0) {
1703  		ret = -ENXIO;
1704  		goto probe_master_put;
1705  	}
1706  
1707  	pm_runtime_enable(dev);
1708  	ret = pm_runtime_get_sync(dev);
1709  	if (ret < 0) {
1710  		pm_runtime_put_noidle(dev);
1711  		goto probe_master_put;
1712  	}
1713  
1714  	ret = clk_prepare_enable(cqspi->clk);
1715  	if (ret) {
1716  		dev_err(dev, "Cannot enable QSPI clock.\n");
1717  		goto probe_clk_failed;
1718  	}
1719  
1720  	/* Obtain QSPI reset control */
1721  	rstc = devm_reset_control_get_optional_exclusive(dev, "qspi");
1722  	if (IS_ERR(rstc)) {
1723  		ret = PTR_ERR(rstc);
1724  		dev_err(dev, "Cannot get QSPI reset.\n");
1725  		goto probe_reset_failed;
1726  	}
1727  
1728  	rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp");
1729  	if (IS_ERR(rstc_ocp)) {
1730  		ret = PTR_ERR(rstc_ocp);
1731  		dev_err(dev, "Cannot get QSPI OCP reset.\n");
1732  		goto probe_reset_failed;
1733  	}
1734  
1735  	reset_control_assert(rstc);
1736  	reset_control_deassert(rstc);
1737  
1738  	reset_control_assert(rstc_ocp);
1739  	reset_control_deassert(rstc_ocp);
1740  
1741  	cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
1742  	master->max_speed_hz = cqspi->master_ref_clk_hz;
1743  
1744  	/* write completion is supported by default */
1745  	cqspi->wr_completion = true;
1746  
1747  	ddata  = of_device_get_match_data(dev);
1748  	if (ddata) {
1749  		if (ddata->quirks & CQSPI_NEEDS_WR_DELAY)
1750  			cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC,
1751  						cqspi->master_ref_clk_hz);
1752  		if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
1753  			master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
1754  		if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
1755  			cqspi->use_direct_mode = true;
1756  		if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
1757  			cqspi->use_dma_read = true;
1758  		if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
1759  			cqspi->wr_completion = false;
1760  
1761  		if (of_device_is_compatible(pdev->dev.of_node,
1762  					    "xlnx,versal-ospi-1.0"))
1763  			dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1764  	}
1765  
1766  	ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
1767  			       pdev->name, cqspi);
1768  	if (ret) {
1769  		dev_err(dev, "Cannot request IRQ.\n");
1770  		goto probe_reset_failed;
1771  	}
1772  
1773  	cqspi_wait_idle(cqspi);
1774  	cqspi_controller_init(cqspi);
1775  	cqspi->current_cs = -1;
1776  	cqspi->sclk = 0;
1777  
1778  	master->num_chipselect = cqspi->num_chipselect;
1779  
1780  	ret = cqspi_setup_flash(cqspi);
1781  	if (ret) {
1782  		dev_err(dev, "failed to setup flash parameters %d\n", ret);
1783  		goto probe_setup_failed;
1784  	}
1785  
1786  	if (cqspi->use_direct_mode) {
1787  		ret = cqspi_request_mmap_dma(cqspi);
1788  		if (ret == -EPROBE_DEFER)
1789  			goto probe_setup_failed;
1790  	}
1791  
1792  	ret = devm_spi_register_master(dev, master);
1793  	if (ret) {
1794  		dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret);
1795  		goto probe_setup_failed;
1796  	}
1797  
1798  	return 0;
1799  probe_setup_failed:
1800  	cqspi_controller_enable(cqspi, 0);
1801  probe_reset_failed:
1802  	clk_disable_unprepare(cqspi->clk);
1803  probe_clk_failed:
1804  	pm_runtime_put_sync(dev);
1805  	pm_runtime_disable(dev);
1806  probe_master_put:
1807  	spi_master_put(master);
1808  	return ret;
1809  }
1810  
1811  static int cqspi_remove(struct platform_device *pdev)
1812  {
1813  	struct cqspi_st *cqspi = platform_get_drvdata(pdev);
1814  
1815  	cqspi_controller_enable(cqspi, 0);
1816  
1817  	if (cqspi->rx_chan)
1818  		dma_release_channel(cqspi->rx_chan);
1819  
1820  	clk_disable_unprepare(cqspi->clk);
1821  
1822  	pm_runtime_put_sync(&pdev->dev);
1823  	pm_runtime_disable(&pdev->dev);
1824  
1825  	return 0;
1826  }
1827  
1828  #ifdef CONFIG_PM_SLEEP
1829  static int cqspi_suspend(struct device *dev)
1830  {
1831  	struct cqspi_st *cqspi = dev_get_drvdata(dev);
1832  
1833  	cqspi_controller_enable(cqspi, 0);
1834  	return 0;
1835  }
1836  
1837  static int cqspi_resume(struct device *dev)
1838  {
1839  	struct cqspi_st *cqspi = dev_get_drvdata(dev);
1840  
1841  	cqspi_controller_enable(cqspi, 1);
1842  	return 0;
1843  }
1844  
1845  static const struct dev_pm_ops cqspi__dev_pm_ops = {
1846  	.suspend = cqspi_suspend,
1847  	.resume = cqspi_resume,
1848  };
1849  
1850  #define CQSPI_DEV_PM_OPS	(&cqspi__dev_pm_ops)
1851  #else
1852  #define CQSPI_DEV_PM_OPS	NULL
1853  #endif
1854  
1855  static const struct cqspi_driver_platdata cdns_qspi = {
1856  	.quirks = CQSPI_DISABLE_DAC_MODE,
1857  };
1858  
1859  static const struct cqspi_driver_platdata k2g_qspi = {
1860  	.quirks = CQSPI_NEEDS_WR_DELAY,
1861  };
1862  
1863  static const struct cqspi_driver_platdata am654_ospi = {
1864  	.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
1865  	.quirks = CQSPI_NEEDS_WR_DELAY,
1866  };
1867  
1868  static const struct cqspi_driver_platdata intel_lgm_qspi = {
1869  	.quirks = CQSPI_DISABLE_DAC_MODE,
1870  };
1871  
1872  static const struct cqspi_driver_platdata socfpga_qspi = {
1873  	.quirks = CQSPI_NO_SUPPORT_WR_COMPLETION,
1874  };
1875  
1876  static const struct cqspi_driver_platdata versal_ospi = {
1877  	.hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
1878  	.quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
1879  	.indirect_read_dma = cqspi_versal_indirect_read_dma,
1880  	.get_dma_status = cqspi_get_versal_dma_status,
1881  };
1882  
1883  static const struct of_device_id cqspi_dt_ids[] = {
1884  	{
1885  		.compatible = "cdns,qspi-nor",
1886  		.data = &cdns_qspi,
1887  	},
1888  	{
1889  		.compatible = "ti,k2g-qspi",
1890  		.data = &k2g_qspi,
1891  	},
1892  	{
1893  		.compatible = "ti,am654-ospi",
1894  		.data = &am654_ospi,
1895  	},
1896  	{
1897  		.compatible = "intel,lgm-qspi",
1898  		.data = &intel_lgm_qspi,
1899  	},
1900  	{
1901  		.compatible = "xlnx,versal-ospi-1.0",
1902  		.data = (void *)&versal_ospi,
1903  	},
1904  	{
1905  		.compatible = "intel,socfpga-qspi",
1906  		.data = (void *)&socfpga_qspi,
1907  	},
1908  	{ /* end of table */ }
1909  };
1910  
1911  MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
1912  
1913  static struct platform_driver cqspi_platform_driver = {
1914  	.probe = cqspi_probe,
1915  	.remove = cqspi_remove,
1916  	.driver = {
1917  		.name = CQSPI_NAME,
1918  		.pm = CQSPI_DEV_PM_OPS,
1919  		.of_match_table = cqspi_dt_ids,
1920  	},
1921  };
1922  
1923  module_platform_driver(cqspi_platform_driver);
1924  
1925  MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
1926  MODULE_LICENSE("GPL v2");
1927  MODULE_ALIAS("platform:" CQSPI_NAME);
1928  MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
1929  MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");
1930  MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>");
1931  MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>");
1932  MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>");
1933