1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Driver for Cadence QSPI Controller 4 // 5 // Copyright Altera Corporation (C) 2012-2014. All rights reserved. 6 // Copyright Intel Corporation (C) 2019-2020. All rights reserved. 7 // Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com 8 9 #include <linux/clk.h> 10 #include <linux/completion.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/err.h> 15 #include <linux/errno.h> 16 #include <linux/firmware/xlnx-zynqmp.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/jiffies.h> 21 #include <linux/kernel.h> 22 #include <linux/log2.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/reset.h> 28 #include <linux/sched.h> 29 #include <linux/spi/spi.h> 30 #include <linux/spi/spi-mem.h> 31 #include <linux/timer.h> 32 33 #define CQSPI_NAME "cadence-qspi" 34 #define CQSPI_MAX_CHIPSELECT 4 35 36 static_assert(CQSPI_MAX_CHIPSELECT <= SPI_DEVICE_CS_CNT_MAX); 37 38 /* Quirks */ 39 #define CQSPI_NEEDS_WR_DELAY BIT(0) 40 #define CQSPI_DISABLE_DAC_MODE BIT(1) 41 #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) 42 #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) 43 #define CQSPI_SLOW_SRAM BIT(4) 44 #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) 45 #define CQSPI_RD_NO_IRQ BIT(6) 46 #define CQSPI_DMA_SET_MASK BIT(7) 47 #define CQSPI_SUPPORT_DEVICE_RESET BIT(8) 48 #define CQSPI_DISABLE_STIG_MODE BIT(9) 49 #define CQSPI_DISABLE_RUNTIME_PM BIT(10) 50 #define CQSPI_NO_INDIRECT_MODE BIT(11) 51 #define CQSPI_HAS_WR_PROTECT BIT(12) 52 53 /* Capabilities */ 54 #define CQSPI_SUPPORTS_OCTAL BIT(0) 55 #define CQSPI_SUPPORTS_QUAD BIT(1) 56 57 #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0) 58 59 enum { 60 CLK_QSPI_REF = 0, 61 CLK_QSPI_APB, 62 CLK_QSPI_AHB, 63 CLK_QSPI_NUM, 64 }; 65 66 struct cqspi_st; 67 68 struct cqspi_flash_pdata { 69 struct cqspi_st *cqspi; 70 u32 clk_rate; 71 u32 read_delay; 72 u32 tshsl_ns; 73 u32 tsd2d_ns; 74 u32 tchsh_ns; 75 u32 tslch_ns; 76 u8 cs; 77 }; 78 79 static const struct clk_bulk_data cqspi_clks[CLK_QSPI_NUM] = { 80 [CLK_QSPI_APB] = { .id = "apb" }, 81 [CLK_QSPI_AHB] = { .id = "ahb" }, 82 }; 83 84 struct cqspi_st { 85 struct platform_device *pdev; 86 struct spi_controller *host; 87 struct clk_bulk_data clks[CLK_QSPI_NUM]; 88 unsigned int sclk; 89 90 void __iomem *iobase; 91 void __iomem *ahb_base; 92 resource_size_t ahb_size; 93 struct completion transfer_complete; 94 95 struct dma_chan *rx_chan; 96 struct completion rx_dma_complete; 97 dma_addr_t mmap_phys_base; 98 99 int current_cs; 100 unsigned long master_ref_clk_hz; 101 bool is_decoded_cs; 102 u32 fifo_depth; 103 u32 fifo_width; 104 u32 num_chipselect; 105 bool rclk_en; 106 u32 trigger_address; 107 u32 wr_delay; 108 bool use_direct_mode; 109 bool use_direct_mode_wr; 110 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; 111 bool use_dma_read; 112 u32 pd_dev_id; 113 bool wr_completion; 114 bool slow_sram; 115 bool apb_ahb_hazard; 116 117 bool is_jh7110; /* Flag for StarFive JH7110 SoC */ 118 bool is_rzn1; /* Flag for Renesas RZ/N1 SoC */ 119 bool disable_stig_mode; 120 refcount_t refcount; 121 refcount_t inflight_ops; 122 123 const struct cqspi_driver_platdata *ddata; 124 }; 125 126 struct cqspi_driver_platdata { 127 u32 hwcaps_mask; 128 u16 quirks; 129 int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, 130 u_char *rxbuf, loff_t from_addr, size_t n_rx); 131 u32 (*get_dma_status)(struct cqspi_st *cqspi); 132 }; 133 134 /* Operation timeout value */ 135 #define CQSPI_TIMEOUT_MS 500 136 #define CQSPI_READ_TIMEOUT_MS 10 137 #define CQSPI_BUSYWAIT_TIMEOUT_US 500 138 139 /* Runtime_pm autosuspend delay */ 140 #define CQSPI_AUTOSUSPEND_TIMEOUT 2000 141 142 #define CQSPI_DUMMY_CLKS_PER_BYTE 8 143 #define CQSPI_DUMMY_BYTES_MAX 4 144 #define CQSPI_DUMMY_CLKS_MAX 31 145 146 #define CQSPI_STIG_DATA_LEN_MAX 8 147 148 /* Register map */ 149 #define CQSPI_REG_CONFIG 0x00 150 #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) 151 #define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7) 152 #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) 153 #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 154 #define CQSPI_REG_CONFIG_DMA_MASK BIT(15) 155 #define CQSPI_REG_CONFIG_BAUD_LSB 19 156 #define CQSPI_REG_CONFIG_DTR_PROTO BIT(24) 157 #define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30) 158 #define CQSPI_REG_CONFIG_IDLE_LSB 31 159 #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF 160 #define CQSPI_REG_CONFIG_BAUD_MASK 0xF 161 #define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5) 162 #define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6) 163 164 #define CQSPI_REG_RD_INSTR 0x04 165 #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 166 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 167 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 168 #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 169 #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 170 #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 171 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 172 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 173 #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 174 #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F 175 176 #define CQSPI_REG_WR_INSTR 0x08 177 #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 178 #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 179 #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 180 181 #define CQSPI_REG_DELAY 0x0C 182 #define CQSPI_REG_DELAY_TSLCH_LSB 0 183 #define CQSPI_REG_DELAY_TCHSH_LSB 8 184 #define CQSPI_REG_DELAY_TSD2D_LSB 16 185 #define CQSPI_REG_DELAY_TSHSL_LSB 24 186 #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF 187 #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF 188 #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF 189 #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF 190 191 #define CQSPI_REG_READCAPTURE 0x10 192 #define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 193 #define CQSPI_REG_READCAPTURE_DELAY_LSB 1 194 #define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF 195 196 #define CQSPI_REG_SIZE 0x14 197 #define CQSPI_REG_SIZE_ADDRESS_LSB 0 198 #define CQSPI_REG_SIZE_PAGE_LSB 4 199 #define CQSPI_REG_SIZE_BLOCK_LSB 16 200 #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF 201 #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF 202 #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F 203 204 #define CQSPI_REG_SRAMPARTITION 0x18 205 #define CQSPI_REG_INDIRECTTRIGGER 0x1C 206 207 #define CQSPI_REG_DMA 0x20 208 #define CQSPI_REG_DMA_SINGLE_LSB 0 209 #define CQSPI_REG_DMA_BURST_LSB 8 210 #define CQSPI_REG_DMA_SINGLE_MASK 0xFF 211 #define CQSPI_REG_DMA_BURST_MASK 0xFF 212 213 #define CQSPI_REG_REMAP 0x24 214 #define CQSPI_REG_MODE_BIT 0x28 215 216 #define CQSPI_REG_SDRAMLEVEL 0x2C 217 #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 218 #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 219 #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF 220 #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF 221 222 #define CQSPI_REG_WR_COMPLETION_CTRL 0x38 223 #define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14) 224 225 #define CQSPI_REG_IRQSTATUS 0x40 226 #define CQSPI_REG_IRQMASK 0x44 227 228 #define CQSPI_REG_WR_PROT_CTRL 0x58 229 230 #define CQSPI_REG_INDIRECTRD 0x60 231 #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) 232 #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) 233 #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) 234 235 #define CQSPI_REG_INDIRECTRDWATERMARK 0x64 236 #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 237 #define CQSPI_REG_INDIRECTRDBYTES 0x6C 238 239 #define CQSPI_REG_CMDCTRL 0x90 240 #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) 241 #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) 242 #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7 243 #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 244 #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 245 #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 246 #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 247 #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 248 #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 249 #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 250 #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 251 #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 252 #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 253 #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F 254 255 #define CQSPI_REG_INDIRECTWR 0x70 256 #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) 257 #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) 258 #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) 259 260 #define CQSPI_REG_INDIRECTWRWATERMARK 0x74 261 #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 262 #define CQSPI_REG_INDIRECTWRBYTES 0x7C 263 264 #define CQSPI_REG_INDTRIG_ADDRRANGE 0x80 265 266 #define CQSPI_REG_CMDADDRESS 0x94 267 #define CQSPI_REG_CMDREADDATALOWER 0xA0 268 #define CQSPI_REG_CMDREADDATAUPPER 0xA4 269 #define CQSPI_REG_CMDWRITEDATALOWER 0xA8 270 #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC 271 272 #define CQSPI_REG_POLLING_STATUS 0xB0 273 #define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16 274 275 #define CQSPI_REG_OP_EXT_LOWER 0xE0 276 #define CQSPI_REG_OP_EXT_READ_LSB 24 277 #define CQSPI_REG_OP_EXT_WRITE_LSB 16 278 #define CQSPI_REG_OP_EXT_STIG_LSB 0 279 280 #define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000 281 282 #define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800 283 #define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804 284 285 #define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C 286 287 #define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814 288 #define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818 289 #define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C 290 #define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1) 291 292 #define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828 293 294 #define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00 295 #define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6 296 297 /* Interrupt status bits */ 298 #define CQSPI_REG_IRQ_MODE_ERR BIT(0) 299 #define CQSPI_REG_IRQ_UNDERFLOW BIT(1) 300 #define CQSPI_REG_IRQ_IND_COMP BIT(2) 301 #define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) 302 #define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) 303 #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) 304 #define CQSPI_REG_IRQ_WATERMARK BIT(6) 305 #define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) 306 307 #define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ 308 CQSPI_REG_IRQ_IND_SRAM_FULL | \ 309 CQSPI_REG_IRQ_IND_COMP) 310 311 #define CQSPI_IRQ_MASK_RD_SLOW_SRAM (CQSPI_REG_IRQ_WATERMARK | \ 312 CQSPI_REG_IRQ_IND_COMP) 313 314 #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ 315 CQSPI_REG_IRQ_WATERMARK | \ 316 CQSPI_REG_IRQ_UNDERFLOW) 317 318 #define CQSPI_IRQ_STATUS_MASK 0x1FFFF 319 #define CQSPI_DMA_UNALIGN 0x3 320 321 #define CQSPI_REG_VERSAL_DMA_VAL 0x602 322 323 static int cqspi_wait_for_bit(const struct cqspi_driver_platdata *ddata, 324 void __iomem *reg, const u32 mask, bool clr, 325 bool busywait) 326 { 327 u64 timeout_us = CQSPI_TIMEOUT_MS * USEC_PER_MSEC; 328 u32 val; 329 330 if (busywait) { 331 int ret = readl_relaxed_poll_timeout(reg, val, 332 (((clr ? ~val : val) & mask) == mask), 333 0, CQSPI_BUSYWAIT_TIMEOUT_US); 334 335 if (ret != -ETIMEDOUT) 336 return ret; 337 338 timeout_us -= CQSPI_BUSYWAIT_TIMEOUT_US; 339 } 340 341 return readl_relaxed_poll_timeout(reg, val, 342 (((clr ? ~val : val) & mask) == mask), 343 10, timeout_us); 344 } 345 346 static bool cqspi_is_idle(struct cqspi_st *cqspi) 347 { 348 u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 349 350 return reg & BIT(CQSPI_REG_CONFIG_IDLE_LSB); 351 } 352 353 static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) 354 { 355 u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL); 356 357 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; 358 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; 359 } 360 361 static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi) 362 { 363 u32 dma_status; 364 365 dma_status = readl(cqspi->iobase + 366 CQSPI_REG_VERSAL_DMA_DST_I_STS); 367 writel(dma_status, cqspi->iobase + 368 CQSPI_REG_VERSAL_DMA_DST_I_STS); 369 370 return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK; 371 } 372 373 static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) 374 { 375 struct cqspi_st *cqspi = dev; 376 const struct cqspi_driver_platdata *ddata = cqspi->ddata; 377 unsigned int irq_status; 378 379 /* Read interrupt status */ 380 irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); 381 382 /* Clear interrupt */ 383 writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); 384 385 if (cqspi->use_dma_read && ddata && ddata->get_dma_status) 386 irq_status = ddata->get_dma_status(cqspi); 387 else if (cqspi->slow_sram) 388 irq_status &= CQSPI_IRQ_MASK_RD_SLOW_SRAM | CQSPI_IRQ_MASK_WR; 389 else 390 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; 391 392 if (irq_status) 393 complete(&cqspi->transfer_complete); 394 395 return IRQ_HANDLED; 396 } 397 398 static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op) 399 { 400 u32 rdreg = 0; 401 402 rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; 403 rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; 404 rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; 405 406 return rdreg; 407 } 408 409 static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op) 410 { 411 unsigned int dummy_clk; 412 413 if (!op->dummy.nbytes) 414 return 0; 415 416 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth); 417 if (op->cmd.dtr) 418 dummy_clk /= 2; 419 420 return dummy_clk; 421 } 422 423 static int cqspi_wait_idle(struct cqspi_st *cqspi) 424 { 425 const unsigned int poll_idle_retry = 3; 426 unsigned int count = 0; 427 unsigned long timeout; 428 429 timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); 430 while (1) { 431 /* 432 * Read few times in succession to ensure the controller 433 * is indeed idle, that is, the bit does not transition 434 * low again. 435 */ 436 if (cqspi_is_idle(cqspi)) 437 count++; 438 else 439 count = 0; 440 441 if (count >= poll_idle_retry) 442 return 0; 443 444 if (time_after(jiffies, timeout)) { 445 /* Timeout, in busy mode. */ 446 dev_err(&cqspi->pdev->dev, 447 "QSPI is still busy after %dms timeout.\n", 448 CQSPI_TIMEOUT_MS); 449 return -ETIMEDOUT; 450 } 451 452 cpu_relax(); 453 } 454 } 455 456 static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) 457 { 458 void __iomem *reg_base = cqspi->iobase; 459 int ret; 460 461 /* Write the CMDCTRL without start execution. */ 462 writel(reg, reg_base + CQSPI_REG_CMDCTRL); 463 /* Start execute */ 464 reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; 465 writel(reg, reg_base + CQSPI_REG_CMDCTRL); 466 467 /* Polling for completion. */ 468 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_CMDCTRL, 469 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1, true); 470 if (ret) { 471 dev_err(&cqspi->pdev->dev, 472 "Flash command execution timed out.\n"); 473 return ret; 474 } 475 476 /* Polling QSPI idle status. */ 477 return cqspi_wait_idle(cqspi); 478 } 479 480 static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata, 481 const struct spi_mem_op *op, 482 unsigned int shift) 483 { 484 struct cqspi_st *cqspi = f_pdata->cqspi; 485 void __iomem *reg_base = cqspi->iobase; 486 unsigned int reg; 487 u8 ext; 488 489 if (op->cmd.nbytes != 2) 490 return -EINVAL; 491 492 /* Opcode extension is the LSB. */ 493 ext = op->cmd.opcode & 0xff; 494 495 reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER); 496 reg &= ~(0xff << shift); 497 reg |= ext << shift; 498 writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER); 499 500 return 0; 501 } 502 503 static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata, 504 const struct spi_mem_op *op, unsigned int shift) 505 { 506 struct cqspi_st *cqspi = f_pdata->cqspi; 507 void __iomem *reg_base = cqspi->iobase; 508 unsigned int reg; 509 int ret; 510 511 reg = readl(reg_base + CQSPI_REG_CONFIG); 512 513 /* 514 * We enable dual byte opcode here. The callers have to set up the 515 * extension opcode based on which type of operation it is. 516 */ 517 if (op->cmd.dtr) { 518 reg |= CQSPI_REG_CONFIG_DTR_PROTO; 519 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE; 520 521 /* Set up command opcode extension. */ 522 ret = cqspi_setup_opcode_ext(f_pdata, op, shift); 523 if (ret) 524 return ret; 525 } else { 526 unsigned int mask = CQSPI_REG_CONFIG_DTR_PROTO | CQSPI_REG_CONFIG_DUAL_OPCODE; 527 /* Shortcut if DTR is already disabled. */ 528 if ((reg & mask) == 0) 529 return 0; 530 reg &= ~mask; 531 } 532 533 writel(reg, reg_base + CQSPI_REG_CONFIG); 534 535 return cqspi_wait_idle(cqspi); 536 } 537 538 static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata, 539 const struct spi_mem_op *op) 540 { 541 struct cqspi_st *cqspi = f_pdata->cqspi; 542 void __iomem *reg_base = cqspi->iobase; 543 u8 *rxbuf = op->data.buf.in; 544 u8 opcode; 545 size_t n_rx = op->data.nbytes; 546 unsigned int rdreg; 547 unsigned int reg; 548 unsigned int dummy_clk; 549 size_t read_len; 550 int status; 551 552 status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); 553 if (status) 554 return status; 555 556 if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { 557 dev_err(&cqspi->pdev->dev, 558 "Invalid input argument, len %zu rxbuf 0x%p\n", 559 n_rx, rxbuf); 560 return -EINVAL; 561 } 562 563 if (op->cmd.dtr) 564 opcode = op->cmd.opcode >> 8; 565 else 566 opcode = op->cmd.opcode; 567 568 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 569 570 rdreg = cqspi_calc_rdreg(op); 571 writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); 572 573 dummy_clk = cqspi_calc_dummy(op); 574 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 575 return -EOPNOTSUPP; 576 577 if (dummy_clk) 578 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK) 579 << CQSPI_REG_CMDCTRL_DUMMY_LSB; 580 581 reg |= BIT(CQSPI_REG_CMDCTRL_RD_EN_LSB); 582 583 /* 0 means 1 byte. */ 584 reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) 585 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); 586 587 /* setup ADDR BIT field */ 588 if (op->addr.nbytes) { 589 reg |= BIT(CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 590 reg |= ((op->addr.nbytes - 1) & 591 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 592 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 593 594 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 595 } 596 597 status = cqspi_exec_flash_cmd(cqspi, reg); 598 if (status) 599 return status; 600 601 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER); 602 603 /* Put the read value into rx_buf */ 604 read_len = (n_rx > 4) ? 4 : n_rx; 605 memcpy(rxbuf, ®, read_len); 606 rxbuf += read_len; 607 608 if (n_rx > 4) { 609 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER); 610 611 read_len = n_rx - read_len; 612 memcpy(rxbuf, ®, read_len); 613 } 614 615 /* Reset CMD_CTRL Reg once command read completes */ 616 writel(0, reg_base + CQSPI_REG_CMDCTRL); 617 618 return 0; 619 } 620 621 static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata, 622 const struct spi_mem_op *op) 623 { 624 struct cqspi_st *cqspi = f_pdata->cqspi; 625 void __iomem *reg_base = cqspi->iobase; 626 u8 opcode; 627 const u8 *txbuf = op->data.buf.out; 628 size_t n_tx = op->data.nbytes; 629 unsigned int reg; 630 unsigned int data; 631 size_t write_len; 632 int ret; 633 634 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); 635 if (ret) 636 return ret; 637 638 if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { 639 dev_err(&cqspi->pdev->dev, 640 "Invalid input argument, cmdlen %zu txbuf 0x%p\n", 641 n_tx, txbuf); 642 return -EINVAL; 643 } 644 645 reg = cqspi_calc_rdreg(op); 646 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 647 648 if (op->cmd.dtr) 649 opcode = op->cmd.opcode >> 8; 650 else 651 opcode = op->cmd.opcode; 652 653 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 654 655 if (op->addr.nbytes) { 656 reg |= BIT(CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 657 reg |= ((op->addr.nbytes - 1) & 658 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 659 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 660 661 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 662 } 663 664 if (n_tx) { 665 reg |= BIT(CQSPI_REG_CMDCTRL_WR_EN_LSB); 666 reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) 667 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; 668 data = 0; 669 write_len = (n_tx > 4) ? 4 : n_tx; 670 memcpy(&data, txbuf, write_len); 671 txbuf += write_len; 672 writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); 673 674 if (n_tx > 4) { 675 data = 0; 676 write_len = n_tx - 4; 677 memcpy(&data, txbuf, write_len); 678 writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER); 679 } 680 } 681 682 ret = cqspi_exec_flash_cmd(cqspi, reg); 683 684 /* Reset CMD_CTRL Reg once command write completes */ 685 writel(0, reg_base + CQSPI_REG_CMDCTRL); 686 687 return ret; 688 } 689 690 static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, 691 const struct spi_mem_op *op) 692 { 693 struct cqspi_st *cqspi = f_pdata->cqspi; 694 void __iomem *reg_base = cqspi->iobase; 695 unsigned int dummy_clk = 0; 696 unsigned int reg; 697 int ret; 698 u8 opcode; 699 700 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB); 701 if (ret) 702 return ret; 703 704 if (op->cmd.dtr) 705 opcode = op->cmd.opcode >> 8; 706 else 707 opcode = op->cmd.opcode; 708 709 reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; 710 reg |= cqspi_calc_rdreg(op); 711 712 /* Setup dummy clock cycles */ 713 dummy_clk = cqspi_calc_dummy(op); 714 715 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 716 return -EOPNOTSUPP; 717 718 if (dummy_clk) 719 reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) 720 << CQSPI_REG_RD_INSTR_DUMMY_LSB; 721 722 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 723 724 /* Set address width */ 725 reg = readl(reg_base + CQSPI_REG_SIZE); 726 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 727 reg |= (op->addr.nbytes - 1); 728 writel(reg, reg_base + CQSPI_REG_SIZE); 729 readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ 730 return 0; 731 } 732 733 static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, 734 u8 *rxbuf, loff_t from_addr, 735 const size_t n_rx) 736 { 737 struct cqspi_st *cqspi = f_pdata->cqspi; 738 bool use_irq = !(cqspi->ddata && cqspi->ddata->quirks & CQSPI_RD_NO_IRQ); 739 struct device *dev = &cqspi->pdev->dev; 740 void __iomem *reg_base = cqspi->iobase; 741 void __iomem *ahb_base = cqspi->ahb_base; 742 unsigned int remaining = n_rx; 743 unsigned int mod_bytes = n_rx % 4; 744 unsigned int bytes_to_read = 0; 745 u8 *rxbuf_end = rxbuf + n_rx; 746 int ret = 0; 747 748 if (!refcount_read(&cqspi->refcount)) 749 return -ENODEV; 750 751 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 752 writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); 753 754 /* Clear all interrupts. */ 755 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 756 757 /* 758 * On SoCFPGA platform reading the SRAM is slow due to 759 * hardware limitation and causing read interrupt storm to CPU, 760 * so enabling only watermark interrupt to disable all read 761 * interrupts later as we want to run "bytes to read" loop with 762 * all the read interrupts disabled for max performance. 763 */ 764 765 if (use_irq && cqspi->slow_sram) 766 writel(CQSPI_IRQ_MASK_RD_SLOW_SRAM, reg_base + CQSPI_REG_IRQMASK); 767 else if (use_irq) 768 writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); 769 else 770 writel(0, reg_base + CQSPI_REG_IRQMASK); 771 772 reinit_completion(&cqspi->transfer_complete); 773 writel(CQSPI_REG_INDIRECTRD_START_MASK, 774 reg_base + CQSPI_REG_INDIRECTRD); 775 readl(reg_base + CQSPI_REG_INDIRECTRD); /* Flush posted write. */ 776 777 while (remaining > 0) { 778 ret = 0; 779 if (use_irq && 780 !wait_for_completion_timeout(&cqspi->transfer_complete, 781 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) 782 ret = -ETIMEDOUT; 783 784 /* 785 * Prevent lost interrupt and race condition by reinitializing early. 786 * A spurious wakeup and another wait cycle can occur here, 787 * which is preferable to waiting until timeout if interrupt is lost. 788 */ 789 if (use_irq) 790 reinit_completion(&cqspi->transfer_complete); 791 792 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 793 794 if (ret && bytes_to_read == 0) { 795 dev_err(dev, "Indirect read timeout, no bytes\n"); 796 goto failrd; 797 } 798 799 while (bytes_to_read != 0) { 800 unsigned int word_remain = round_down(remaining, 4); 801 802 bytes_to_read *= cqspi->fifo_width; 803 bytes_to_read = bytes_to_read > remaining ? 804 remaining : bytes_to_read; 805 bytes_to_read = round_down(bytes_to_read, 4); 806 /* Read 4 byte word chunks then single bytes */ 807 if (bytes_to_read) { 808 ioread32_rep(ahb_base, rxbuf, 809 (bytes_to_read / 4)); 810 } else if (!word_remain && mod_bytes) { 811 unsigned int temp = ioread32(ahb_base); 812 813 bytes_to_read = mod_bytes; 814 memcpy(rxbuf, &temp, min((unsigned int) 815 (rxbuf_end - rxbuf), 816 bytes_to_read)); 817 } 818 rxbuf += bytes_to_read; 819 remaining -= bytes_to_read; 820 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 821 } 822 } 823 824 /* Check indirect done status */ 825 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTRD, 826 CQSPI_REG_INDIRECTRD_DONE_MASK, 0, true); 827 if (ret) { 828 dev_err(dev, "Indirect read completion error (%i)\n", ret); 829 goto failrd; 830 } 831 832 /* Disable interrupt */ 833 writel(0, reg_base + CQSPI_REG_IRQMASK); 834 835 /* Clear indirect completion status */ 836 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD); 837 838 return 0; 839 840 failrd: 841 /* Disable interrupt */ 842 writel(0, reg_base + CQSPI_REG_IRQMASK); 843 844 /* Cancel the indirect read */ 845 writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK, 846 reg_base + CQSPI_REG_INDIRECTRD); 847 return ret; 848 } 849 850 static void cqspi_device_reset(struct cqspi_st *cqspi) 851 { 852 u32 reg; 853 854 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 855 reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK; 856 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 857 /* 858 * NOTE: Delay timing implementation is derived from 859 * spi_nor_hw_reset() 860 */ 861 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 862 usleep_range(1, 5); 863 writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 864 usleep_range(100, 150); 865 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 866 usleep_range(1000, 1200); 867 } 868 869 static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) 870 { 871 void __iomem *reg_base = cqspi->iobase; 872 unsigned int reg; 873 874 reg = readl(reg_base + CQSPI_REG_CONFIG); 875 876 if (enable) 877 reg |= CQSPI_REG_CONFIG_ENABLE_MASK; 878 else 879 reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; 880 881 writel(reg, reg_base + CQSPI_REG_CONFIG); 882 } 883 884 static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata, 885 u_char *rxbuf, loff_t from_addr, 886 size_t n_rx) 887 { 888 struct cqspi_st *cqspi = f_pdata->cqspi; 889 struct device *dev = &cqspi->pdev->dev; 890 void __iomem *reg_base = cqspi->iobase; 891 u32 reg, bytes_to_dma; 892 loff_t addr = from_addr; 893 void *buf = rxbuf; 894 dma_addr_t dma_addr; 895 u8 bytes_rem; 896 int ret = 0; 897 898 bytes_rem = n_rx % 4; 899 bytes_to_dma = (n_rx - bytes_rem); 900 901 if (!bytes_to_dma) 902 goto nondmard; 903 904 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA); 905 if (ret) 906 return ret; 907 908 cqspi_controller_enable(cqspi, 0); 909 910 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 911 reg |= CQSPI_REG_CONFIG_DMA_MASK; 912 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 913 914 cqspi_controller_enable(cqspi, 1); 915 916 dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE); 917 if (dma_mapping_error(dev, dma_addr)) { 918 dev_err(dev, "dma mapping failed\n"); 919 return -ENOMEM; 920 } 921 922 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 923 writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES); 924 writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL, 925 reg_base + CQSPI_REG_INDTRIG_ADDRRANGE); 926 927 /* Clear all interrupts. */ 928 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 929 930 /* Enable DMA done interrupt */ 931 writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK, 932 reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN); 933 934 /* Default DMA periph configuration */ 935 writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA); 936 937 /* Configure DMA Dst address */ 938 writel(lower_32_bits(dma_addr), 939 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR); 940 writel(upper_32_bits(dma_addr), 941 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB); 942 943 /* Configure DMA Src address */ 944 writel(cqspi->trigger_address, reg_base + 945 CQSPI_REG_VERSAL_DMA_SRC_ADDR); 946 947 /* Set DMA destination size */ 948 writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE); 949 950 /* Set DMA destination control */ 951 writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL, 952 reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL); 953 954 writel(CQSPI_REG_INDIRECTRD_START_MASK, 955 reg_base + CQSPI_REG_INDIRECTRD); 956 957 reinit_completion(&cqspi->transfer_complete); 958 959 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 960 msecs_to_jiffies(max_t(size_t, bytes_to_dma, 500)))) { 961 ret = -ETIMEDOUT; 962 goto failrd; 963 } 964 965 /* Disable DMA interrupt */ 966 writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 967 968 /* Clear indirect completion status */ 969 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, 970 cqspi->iobase + CQSPI_REG_INDIRECTRD); 971 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 972 973 cqspi_controller_enable(cqspi, 0); 974 975 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 976 reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 977 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 978 979 cqspi_controller_enable(cqspi, 1); 980 981 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, 982 PM_OSPI_MUX_SEL_LINEAR); 983 if (ret) 984 return ret; 985 986 nondmard: 987 if (bytes_rem) { 988 addr += bytes_to_dma; 989 buf += bytes_to_dma; 990 ret = cqspi_indirect_read_execute(f_pdata, buf, addr, 991 bytes_rem); 992 if (ret) 993 return ret; 994 } 995 996 return 0; 997 998 failrd: 999 /* Disable DMA interrupt */ 1000 writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 1001 1002 /* Cancel the indirect read */ 1003 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 1004 reg_base + CQSPI_REG_INDIRECTRD); 1005 1006 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 1007 1008 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1009 reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 1010 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1011 1012 zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR); 1013 1014 return ret; 1015 } 1016 1017 static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, 1018 const struct spi_mem_op *op) 1019 { 1020 unsigned int reg; 1021 int ret; 1022 struct cqspi_st *cqspi = f_pdata->cqspi; 1023 void __iomem *reg_base = cqspi->iobase; 1024 u8 opcode; 1025 1026 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB); 1027 if (ret) 1028 return ret; 1029 1030 if (op->cmd.dtr) 1031 opcode = op->cmd.opcode >> 8; 1032 else 1033 opcode = op->cmd.opcode; 1034 1035 /* Set opcode. */ 1036 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; 1037 reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB; 1038 reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB; 1039 writel(reg, reg_base + CQSPI_REG_WR_INSTR); 1040 reg = cqspi_calc_rdreg(op); 1041 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 1042 1043 /* 1044 * SPI NAND flashes require the address of the status register to be 1045 * passed in the Read SR command. Also, some SPI NOR flashes like the 1046 * cypress Semper flash expect a 4-byte dummy address in the Read SR 1047 * command in DTR mode. 1048 * 1049 * But this controller does not support address phase in the Read SR 1050 * command when doing auto-HW polling. So, disable write completion 1051 * polling on the controller's side. spinand and spi-nor will take 1052 * care of polling the status register. 1053 */ 1054 if (cqspi->wr_completion) { 1055 reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); 1056 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; 1057 writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); 1058 /* 1059 * DAC mode require auto polling as flash needs to be polled 1060 * for write completion in case of bubble in SPI transaction 1061 * due to slow CPU/DMA master. 1062 */ 1063 cqspi->use_direct_mode_wr = false; 1064 } 1065 1066 reg = readl(reg_base + CQSPI_REG_SIZE); 1067 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 1068 reg |= (op->addr.nbytes - 1); 1069 writel(reg, reg_base + CQSPI_REG_SIZE); 1070 readl(reg_base + CQSPI_REG_SIZE); /* Flush posted write. */ 1071 return 0; 1072 } 1073 1074 static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, 1075 loff_t to_addr, const u8 *txbuf, 1076 const size_t n_tx) 1077 { 1078 struct cqspi_st *cqspi = f_pdata->cqspi; 1079 struct device *dev = &cqspi->pdev->dev; 1080 void __iomem *reg_base = cqspi->iobase; 1081 unsigned int remaining = n_tx; 1082 unsigned int write_bytes; 1083 int ret; 1084 1085 if (!refcount_read(&cqspi->refcount)) 1086 return -ENODEV; 1087 1088 writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); 1089 writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); 1090 1091 /* Clear all interrupts. */ 1092 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 1093 1094 writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK); 1095 1096 reinit_completion(&cqspi->transfer_complete); 1097 writel(CQSPI_REG_INDIRECTWR_START_MASK, 1098 reg_base + CQSPI_REG_INDIRECTWR); 1099 readl(reg_base + CQSPI_REG_INDIRECTWR); /* Flush posted write. */ 1100 1101 /* 1102 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access 1103 * Controller programming sequence, couple of cycles of 1104 * QSPI_REF_CLK delay is required for the above bit to 1105 * be internally synchronized by the QSPI module. Provide 5 1106 * cycles of delay. 1107 */ 1108 if (cqspi->wr_delay) 1109 ndelay(cqspi->wr_delay); 1110 1111 /* 1112 * If a hazard exists between the APB and AHB interfaces, perform a 1113 * dummy readback from the controller to ensure synchronization. 1114 */ 1115 if (cqspi->apb_ahb_hazard) 1116 readl(reg_base + CQSPI_REG_INDIRECTWR); 1117 1118 while (remaining > 0) { 1119 size_t write_words, mod_bytes; 1120 1121 write_bytes = remaining; 1122 write_words = write_bytes / 4; 1123 mod_bytes = write_bytes % 4; 1124 /* Write 4 bytes at a time then single bytes. */ 1125 if (write_words) { 1126 iowrite32_rep(cqspi->ahb_base, txbuf, write_words); 1127 txbuf += (write_words * 4); 1128 } 1129 if (mod_bytes) { 1130 unsigned int temp = 0xFFFFFFFF; 1131 1132 memcpy(&temp, txbuf, mod_bytes); 1133 iowrite32(temp, cqspi->ahb_base); 1134 txbuf += mod_bytes; 1135 } 1136 1137 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 1138 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { 1139 dev_err(dev, "Indirect write timeout\n"); 1140 ret = -ETIMEDOUT; 1141 goto failwr; 1142 } 1143 1144 remaining -= write_bytes; 1145 1146 if (remaining > 0) 1147 reinit_completion(&cqspi->transfer_complete); 1148 } 1149 1150 /* Check indirect done status */ 1151 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTWR, 1152 CQSPI_REG_INDIRECTWR_DONE_MASK, 0, false); 1153 if (ret) { 1154 dev_err(dev, "Indirect write completion error (%i)\n", ret); 1155 goto failwr; 1156 } 1157 1158 /* Disable interrupt. */ 1159 writel(0, reg_base + CQSPI_REG_IRQMASK); 1160 1161 /* Clear indirect completion status */ 1162 writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR); 1163 1164 cqspi_wait_idle(cqspi); 1165 1166 return 0; 1167 1168 failwr: 1169 /* Disable interrupt. */ 1170 writel(0, reg_base + CQSPI_REG_IRQMASK); 1171 1172 /* Cancel the indirect write */ 1173 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 1174 reg_base + CQSPI_REG_INDIRECTWR); 1175 return ret; 1176 } 1177 1178 static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata) 1179 { 1180 struct cqspi_st *cqspi = f_pdata->cqspi; 1181 void __iomem *reg_base = cqspi->iobase; 1182 unsigned int chip_select = f_pdata->cs; 1183 unsigned int reg; 1184 1185 reg = readl(reg_base + CQSPI_REG_CONFIG); 1186 if (cqspi->is_decoded_cs) { 1187 reg |= CQSPI_REG_CONFIG_DECODE_MASK; 1188 } else { 1189 reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; 1190 1191 /* Convert CS if without decoder. 1192 * CS0 to 4b'1110 1193 * CS1 to 4b'1101 1194 * CS2 to 4b'1011 1195 * CS3 to 4b'0111 1196 */ 1197 chip_select = 0xF & ~BIT(chip_select); 1198 } 1199 1200 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK 1201 << CQSPI_REG_CONFIG_CHIPSELECT_LSB); 1202 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) 1203 << CQSPI_REG_CONFIG_CHIPSELECT_LSB; 1204 writel(reg, reg_base + CQSPI_REG_CONFIG); 1205 } 1206 1207 static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, 1208 const unsigned int ns_val) 1209 { 1210 unsigned int ticks; 1211 1212 ticks = ref_clk_hz / 1000; /* kHz */ 1213 ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); 1214 1215 return ticks; 1216 } 1217 1218 static void cqspi_delay(struct cqspi_flash_pdata *f_pdata) 1219 { 1220 struct cqspi_st *cqspi = f_pdata->cqspi; 1221 void __iomem *iobase = cqspi->iobase; 1222 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; 1223 unsigned int tshsl, tchsh, tslch, tsd2d; 1224 unsigned int reg; 1225 unsigned int tsclk; 1226 1227 /* calculate the number of ref ticks for one sclk tick */ 1228 tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk); 1229 1230 tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns); 1231 /* this particular value must be at least one sclk */ 1232 if (tshsl < tsclk) 1233 tshsl = tsclk; 1234 1235 tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns); 1236 tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns); 1237 tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns); 1238 1239 reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) 1240 << CQSPI_REG_DELAY_TSHSL_LSB; 1241 reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) 1242 << CQSPI_REG_DELAY_TCHSH_LSB; 1243 reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) 1244 << CQSPI_REG_DELAY_TSLCH_LSB; 1245 reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) 1246 << CQSPI_REG_DELAY_TSD2D_LSB; 1247 writel(reg, iobase + CQSPI_REG_DELAY); 1248 } 1249 1250 static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) 1251 { 1252 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; 1253 void __iomem *reg_base = cqspi->iobase; 1254 u32 reg, div; 1255 1256 /* Recalculate the baudrate divisor based on QSPI specification. */ 1257 div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1; 1258 1259 /* Maximum baud divisor */ 1260 if (div > CQSPI_REG_CONFIG_BAUD_MASK) { 1261 div = CQSPI_REG_CONFIG_BAUD_MASK; 1262 dev_warn(&cqspi->pdev->dev, 1263 "Unable to adjust clock <= %d hz. Reduced to %d hz\n", 1264 cqspi->sclk, ref_clk_hz/((div+1)*2)); 1265 } 1266 1267 reg = readl(reg_base + CQSPI_REG_CONFIG); 1268 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); 1269 reg |= div << CQSPI_REG_CONFIG_BAUD_LSB; 1270 writel(reg, reg_base + CQSPI_REG_CONFIG); 1271 } 1272 1273 static void cqspi_readdata_capture(struct cqspi_st *cqspi, 1274 const bool bypass, 1275 const unsigned int delay) 1276 { 1277 void __iomem *reg_base = cqspi->iobase; 1278 unsigned int reg; 1279 1280 reg = readl(reg_base + CQSPI_REG_READCAPTURE); 1281 1282 if (bypass) 1283 reg |= BIT(CQSPI_REG_READCAPTURE_BYPASS_LSB); 1284 else 1285 reg &= ~BIT(CQSPI_REG_READCAPTURE_BYPASS_LSB); 1286 1287 reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK 1288 << CQSPI_REG_READCAPTURE_DELAY_LSB); 1289 1290 reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) 1291 << CQSPI_REG_READCAPTURE_DELAY_LSB; 1292 1293 writel(reg, reg_base + CQSPI_REG_READCAPTURE); 1294 } 1295 1296 static void cqspi_configure(struct cqspi_flash_pdata *f_pdata, 1297 unsigned long sclk) 1298 { 1299 struct cqspi_st *cqspi = f_pdata->cqspi; 1300 int switch_cs = (cqspi->current_cs != f_pdata->cs); 1301 int switch_ck = (cqspi->sclk != sclk); 1302 1303 if (switch_cs || switch_ck) 1304 cqspi_controller_enable(cqspi, 0); 1305 1306 /* Switch chip select. */ 1307 if (switch_cs) { 1308 cqspi->current_cs = f_pdata->cs; 1309 cqspi_chipselect(f_pdata); 1310 } 1311 1312 /* Setup baudrate divisor and delays */ 1313 if (switch_ck) { 1314 cqspi->sclk = sclk; 1315 cqspi_config_baudrate_div(cqspi); 1316 cqspi_delay(f_pdata); 1317 cqspi_readdata_capture(cqspi, !cqspi->rclk_en, 1318 f_pdata->read_delay); 1319 } 1320 1321 if (switch_cs || switch_ck) 1322 cqspi_controller_enable(cqspi, 1); 1323 } 1324 1325 static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata, 1326 const struct spi_mem_op *op) 1327 { 1328 struct cqspi_st *cqspi = f_pdata->cqspi; 1329 loff_t to = op->addr.val; 1330 size_t len = op->data.nbytes; 1331 const u_char *buf = op->data.buf.out; 1332 int ret; 1333 1334 ret = cqspi_write_setup(f_pdata, op); 1335 if (ret) 1336 return ret; 1337 1338 /* 1339 * Some flashes like the Cypress Semper flash expect a dummy 4-byte 1340 * address (all 0s) with the read status register command in DTR mode. 1341 * But this controller does not support sending dummy address bytes to 1342 * the flash when it is polling the write completion register in DTR 1343 * mode. So, we can not use direct mode when in DTR mode for writing 1344 * data. 1345 */ 1346 if ((!op->cmd.dtr && cqspi->use_direct_mode && 1347 cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) || 1348 (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) { 1349 memcpy_toio(cqspi->ahb_base + to, buf, len); 1350 return cqspi_wait_idle(cqspi); 1351 } 1352 1353 return cqspi_indirect_write_execute(f_pdata, to, buf, len); 1354 } 1355 1356 static void cqspi_rx_dma_callback(void *param) 1357 { 1358 struct cqspi_st *cqspi = param; 1359 1360 complete(&cqspi->rx_dma_complete); 1361 } 1362 1363 static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata, 1364 u_char *buf, loff_t from, size_t len) 1365 { 1366 struct cqspi_st *cqspi = f_pdata->cqspi; 1367 struct device *dev = &cqspi->pdev->dev; 1368 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 1369 dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from; 1370 int ret = 0; 1371 struct dma_async_tx_descriptor *tx; 1372 dma_cookie_t cookie; 1373 dma_addr_t dma_dst; 1374 struct device *ddev; 1375 1376 if (!cqspi->rx_chan || !virt_addr_valid(buf)) { 1377 memcpy_fromio(buf, cqspi->ahb_base + from, len); 1378 return 0; 1379 } 1380 1381 ddev = cqspi->rx_chan->device->dev; 1382 dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE); 1383 if (dma_mapping_error(ddev, dma_dst)) { 1384 dev_err(dev, "dma mapping failed\n"); 1385 return -ENOMEM; 1386 } 1387 tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src, 1388 len, flags); 1389 if (!tx) { 1390 dev_err(dev, "device_prep_dma_memcpy error\n"); 1391 ret = -EIO; 1392 goto err_unmap; 1393 } 1394 1395 tx->callback = cqspi_rx_dma_callback; 1396 tx->callback_param = cqspi; 1397 cookie = tx->tx_submit(tx); 1398 reinit_completion(&cqspi->rx_dma_complete); 1399 1400 ret = dma_submit_error(cookie); 1401 if (ret) { 1402 dev_err(dev, "dma_submit_error %d\n", cookie); 1403 ret = -EIO; 1404 goto err_unmap; 1405 } 1406 1407 dma_async_issue_pending(cqspi->rx_chan); 1408 if (!wait_for_completion_timeout(&cqspi->rx_dma_complete, 1409 msecs_to_jiffies(max_t(size_t, len, 500)))) { 1410 dmaengine_terminate_sync(cqspi->rx_chan); 1411 dev_err(dev, "DMA wait_for_completion_timeout\n"); 1412 ret = -ETIMEDOUT; 1413 goto err_unmap; 1414 } 1415 1416 err_unmap: 1417 dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE); 1418 1419 return ret; 1420 } 1421 1422 static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata, 1423 const struct spi_mem_op *op) 1424 { 1425 struct cqspi_st *cqspi = f_pdata->cqspi; 1426 const struct cqspi_driver_platdata *ddata = cqspi->ddata; 1427 loff_t from = op->addr.val; 1428 size_t len = op->data.nbytes; 1429 u_char *buf = op->data.buf.in; 1430 u64 dma_align = (u64)(uintptr_t)buf; 1431 int ret; 1432 1433 ret = cqspi_read_setup(f_pdata, op); 1434 if (ret) 1435 return ret; 1436 1437 if ((cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) || 1438 (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) 1439 return cqspi_direct_read_execute(f_pdata, buf, from, len); 1440 1441 if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma && 1442 virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0)) 1443 return ddata->indirect_read_dma(f_pdata, buf, from, len); 1444 1445 return cqspi_indirect_read_execute(f_pdata, buf, from, len); 1446 } 1447 1448 static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) 1449 { 1450 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1451 struct cqspi_flash_pdata *f_pdata; 1452 1453 f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)]; 1454 cqspi_configure(f_pdata, op->max_freq); 1455 1456 if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) { 1457 /* 1458 * Performing reads in DAC mode forces to read minimum 4 bytes 1459 * which is unsupported on some flash devices during register 1460 * reads, prefer STIG mode for such small reads. 1461 */ 1462 if (!op->addr.nbytes || 1463 (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX && 1464 !cqspi->disable_stig_mode)) 1465 return cqspi_command_read(f_pdata, op); 1466 1467 return cqspi_read(f_pdata, op); 1468 } 1469 1470 if (!op->addr.nbytes || !op->data.buf.out) 1471 return cqspi_command_write(f_pdata, op); 1472 1473 return cqspi_write(f_pdata, op); 1474 } 1475 1476 static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) 1477 { 1478 int ret; 1479 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1480 struct device *dev = &cqspi->pdev->dev; 1481 const struct cqspi_driver_platdata *ddata = of_device_get_match_data(dev); 1482 1483 if (refcount_read(&cqspi->inflight_ops) == 0) 1484 return -ENODEV; 1485 1486 if (!refcount_read(&cqspi->refcount)) 1487 return -EBUSY; 1488 1489 refcount_inc(&cqspi->inflight_ops); 1490 1491 if (!refcount_read(&cqspi->refcount)) { 1492 if (refcount_read(&cqspi->inflight_ops)) 1493 refcount_dec(&cqspi->inflight_ops); 1494 return -EBUSY; 1495 } 1496 1497 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 1498 ret = pm_runtime_resume_and_get(dev); 1499 if (ret) { 1500 dev_err(&mem->spi->dev, "resume failed with %d\n", ret); 1501 goto dec_inflight_refcount; 1502 } 1503 } 1504 1505 ret = cqspi_mem_process(mem, op); 1506 1507 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) 1508 pm_runtime_put_autosuspend(dev); 1509 1510 if (ret) 1511 dev_err(&mem->spi->dev, "operation failed with %d\n", ret); 1512 1513 dec_inflight_refcount: 1514 if (refcount_read(&cqspi->inflight_ops) > 1) 1515 refcount_dec(&cqspi->inflight_ops); 1516 1517 return ret; 1518 } 1519 1520 static bool cqspi_supports_mem_op(struct spi_mem *mem, 1521 const struct spi_mem_op *op) 1522 { 1523 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1524 bool all_true, all_false; 1525 1526 /* 1527 * op->dummy.dtr is required for converting nbytes into ncycles. 1528 * Also, don't check the dtr field of the op phase having zero nbytes. 1529 */ 1530 all_true = op->cmd.dtr && 1531 (!op->addr.nbytes || op->addr.dtr) && 1532 (!op->dummy.nbytes || op->dummy.dtr) && 1533 (!op->data.nbytes || op->data.dtr); 1534 1535 all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr && 1536 !op->data.dtr; 1537 1538 if (all_true) { 1539 /* Right now we only support 8-8-8 DTR mode. */ 1540 if (op->cmd.nbytes && op->cmd.buswidth != 8) 1541 return false; 1542 if (op->addr.nbytes && op->addr.buswidth != 8) 1543 return false; 1544 if (op->data.nbytes && op->data.buswidth != 8) 1545 return false; 1546 1547 /* A single opcode is supported, it will be repeated */ 1548 if ((op->cmd.opcode >> 8) != (op->cmd.opcode & 0xFF)) 1549 return false; 1550 1551 if (cqspi->is_rzn1) 1552 return false; 1553 } else if (!all_false) { 1554 /* Mixed DTR modes are not supported. */ 1555 return false; 1556 } 1557 1558 return spi_mem_default_supports_op(mem, op); 1559 } 1560 1561 static int cqspi_of_get_flash_pdata(struct platform_device *pdev, 1562 struct cqspi_flash_pdata *f_pdata, 1563 struct device_node *np) 1564 { 1565 if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) { 1566 dev_err(&pdev->dev, "couldn't determine read-delay\n"); 1567 return -ENXIO; 1568 } 1569 1570 if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) { 1571 dev_err(&pdev->dev, "couldn't determine tshsl-ns\n"); 1572 return -ENXIO; 1573 } 1574 1575 if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) { 1576 dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n"); 1577 return -ENXIO; 1578 } 1579 1580 if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) { 1581 dev_err(&pdev->dev, "couldn't determine tchsh-ns\n"); 1582 return -ENXIO; 1583 } 1584 1585 if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) { 1586 dev_err(&pdev->dev, "couldn't determine tslch-ns\n"); 1587 return -ENXIO; 1588 } 1589 1590 if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) { 1591 dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n"); 1592 return -ENXIO; 1593 } 1594 1595 return 0; 1596 } 1597 1598 static int cqspi_of_get_pdata(struct cqspi_st *cqspi) 1599 { 1600 struct device *dev = &cqspi->pdev->dev; 1601 struct device_node *np = dev->of_node; 1602 u32 id[2]; 1603 1604 cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); 1605 1606 if (!(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) { 1607 if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { 1608 /* Zero signals FIFO depth should be runtime detected. */ 1609 cqspi->fifo_depth = 0; 1610 } 1611 1612 if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) 1613 cqspi->fifo_width = 4; 1614 1615 if (of_property_read_u32(np, "cdns,trigger-address", 1616 &cqspi->trigger_address)) { 1617 dev_err(dev, "couldn't determine trigger-address\n"); 1618 return -ENXIO; 1619 } 1620 } 1621 1622 if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect)) 1623 cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT; 1624 1625 cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); 1626 1627 if (!of_property_read_u32_array(np, "power-domains", id, 1628 ARRAY_SIZE(id))) 1629 cqspi->pd_dev_id = id[1]; 1630 1631 return 0; 1632 } 1633 1634 static void cqspi_controller_init(struct cqspi_st *cqspi) 1635 { 1636 u32 reg; 1637 1638 /* Configure the remap address register, no remap */ 1639 writel(0, cqspi->iobase + CQSPI_REG_REMAP); 1640 1641 /* Disable all interrupts. */ 1642 writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); 1643 1644 if (!(cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE)) { 1645 /* Configure the SRAM split to 1:1 . */ 1646 writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1647 /* Load indirect trigger address. */ 1648 writel(cqspi->trigger_address, 1649 cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); 1650 1651 /* Program read watermark -- 1/2 of the FIFO. */ 1652 writel(cqspi->fifo_depth * cqspi->fifo_width / 2, 1653 cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); 1654 /* Program write watermark -- 1/8 of the FIFO. */ 1655 writel(cqspi->fifo_depth * cqspi->fifo_width / 8, 1656 cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); 1657 } 1658 1659 /* Disable write protection at controller level */ 1660 if (cqspi->ddata && cqspi->ddata->quirks & CQSPI_HAS_WR_PROTECT) 1661 writel(0, cqspi->iobase + CQSPI_REG_WR_PROT_CTRL); 1662 1663 /* Disable direct access controller */ 1664 if (!cqspi->use_direct_mode) { 1665 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1666 reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL; 1667 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1668 } 1669 1670 /* Enable DMA interface */ 1671 if (cqspi->use_dma_read) { 1672 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1673 reg |= CQSPI_REG_CONFIG_DMA_MASK; 1674 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1675 } 1676 } 1677 1678 static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi) 1679 { 1680 struct device *dev = &cqspi->pdev->dev; 1681 u32 reg, fifo_depth; 1682 1683 if (cqspi->ddata && cqspi->ddata->quirks & CQSPI_NO_INDIRECT_MODE) 1684 return; 1685 1686 /* 1687 * Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N 1688 * the FIFO depth. 1689 */ 1690 writel(U32_MAX, cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1691 reg = readl(cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1692 fifo_depth = reg + 1; 1693 1694 /* FIFO depth of zero means no value from devicetree was provided. */ 1695 if (cqspi->fifo_depth == 0) { 1696 cqspi->fifo_depth = fifo_depth; 1697 dev_dbg(dev, "using FIFO depth of %u\n", fifo_depth); 1698 } else if (fifo_depth != cqspi->fifo_depth) { 1699 dev_warn(dev, "detected FIFO depth (%u) different from config (%u)\n", 1700 fifo_depth, cqspi->fifo_depth); 1701 } 1702 } 1703 1704 static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) 1705 { 1706 dma_cap_mask_t mask; 1707 1708 dma_cap_zero(mask); 1709 dma_cap_set(DMA_MEMCPY, mask); 1710 1711 cqspi->rx_chan = dma_request_chan_by_mask(&mask); 1712 if (IS_ERR(cqspi->rx_chan)) { 1713 int ret = PTR_ERR(cqspi->rx_chan); 1714 1715 cqspi->rx_chan = NULL; 1716 if (ret == -ENODEV) { 1717 /* DMA support is not mandatory */ 1718 dev_info(&cqspi->pdev->dev, "No Rx DMA available\n"); 1719 return 0; 1720 } 1721 1722 return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n"); 1723 } 1724 init_completion(&cqspi->rx_dma_complete); 1725 1726 return 0; 1727 } 1728 1729 static const char *cqspi_get_name(struct spi_mem *mem) 1730 { 1731 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1732 struct device *dev = &cqspi->pdev->dev; 1733 1734 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), 1735 spi_get_chipselect(mem->spi, 0)); 1736 } 1737 1738 static const struct spi_controller_mem_ops cqspi_mem_ops = { 1739 .exec_op = cqspi_exec_mem_op, 1740 .get_name = cqspi_get_name, 1741 .supports_op = cqspi_supports_mem_op, 1742 }; 1743 1744 static const struct spi_controller_mem_caps cqspi_mem_caps = { 1745 .dtr = true, 1746 .per_op_freq = true, 1747 }; 1748 1749 static int cqspi_setup_flash(struct cqspi_st *cqspi) 1750 { 1751 struct platform_device *pdev = cqspi->pdev; 1752 struct device *dev = &pdev->dev; 1753 struct cqspi_flash_pdata *f_pdata; 1754 int ret, cs, max_cs = -1; 1755 1756 /* Get flash device data */ 1757 for_each_available_child_of_node_scoped(dev->of_node, np) { 1758 ret = of_property_read_u32(np, "reg", &cs); 1759 if (ret) { 1760 dev_err(dev, "Couldn't determine chip select.\n"); 1761 return ret; 1762 } 1763 1764 if (cs >= cqspi->num_chipselect) { 1765 dev_err(dev, "Chip select %d out of range.\n", cs); 1766 return -EINVAL; 1767 } 1768 1769 max_cs = max_t(int, cs, max_cs); 1770 1771 f_pdata = &cqspi->f_pdata[cs]; 1772 f_pdata->cqspi = cqspi; 1773 f_pdata->cs = cs; 1774 1775 ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); 1776 if (ret) 1777 return ret; 1778 } 1779 1780 if (max_cs < 0) { 1781 dev_err(dev, "No flash device declared\n"); 1782 return -ENODEV; 1783 } 1784 1785 cqspi->num_chipselect = max_cs + 1; 1786 return 0; 1787 } 1788 1789 static int cqspi_probe(struct platform_device *pdev) 1790 { 1791 const struct cqspi_driver_platdata *ddata; 1792 struct reset_control *rstc, *rstc_ocp, *rstc_ref; 1793 struct device *dev = &pdev->dev; 1794 struct spi_controller *host; 1795 struct resource *res_ahb; 1796 struct cqspi_st *cqspi; 1797 int ret, irq; 1798 1799 host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi)); 1800 if (!host) 1801 return -ENOMEM; 1802 1803 host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL; 1804 host->mem_ops = &cqspi_mem_ops; 1805 host->mem_caps = &cqspi_mem_caps; 1806 1807 cqspi = spi_controller_get_devdata(host); 1808 if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) 1809 cqspi->is_jh7110 = true; 1810 if (of_device_is_compatible(pdev->dev.of_node, "renesas,rzn1-qspi")) 1811 cqspi->is_rzn1 = true; 1812 1813 cqspi->pdev = pdev; 1814 cqspi->host = host; 1815 cqspi->ddata = ddata = of_device_get_match_data(dev); 1816 platform_set_drvdata(pdev, cqspi); 1817 1818 /* Obtain configuration from OF. */ 1819 ret = cqspi_of_get_pdata(cqspi); 1820 if (ret) { 1821 dev_err(dev, "Cannot get mandatory OF data.\n"); 1822 return -ENODEV; 1823 } 1824 1825 ret = cqspi_setup_flash(cqspi); 1826 if (ret) { 1827 dev_err(dev, "failed to setup flash parameters %d\n", ret); 1828 return ret; 1829 } 1830 1831 /* Obtain QSPI clocks. */ 1832 memcpy(&cqspi->clks, &cqspi_clks, sizeof(cqspi->clks)); 1833 ret = devm_clk_bulk_get_optional(dev, CLK_QSPI_NUM, cqspi->clks); 1834 if (ret) 1835 return dev_err_probe(dev, ret, "Failed to get clocks\n"); 1836 1837 if (!cqspi->clks[CLK_QSPI_REF].clk) { 1838 dev_err(dev, "Cannot claim mandatory QSPI ref clock.\n"); 1839 return -ENODEV; 1840 } 1841 1842 /* Obtain and remap controller address. */ 1843 cqspi->iobase = devm_platform_ioremap_resource(pdev, 0); 1844 if (IS_ERR(cqspi->iobase)) { 1845 dev_err(dev, "Cannot remap controller address.\n"); 1846 ret = PTR_ERR(cqspi->iobase); 1847 return ret; 1848 } 1849 1850 /* Obtain and remap AHB address. */ 1851 cqspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res_ahb); 1852 if (IS_ERR(cqspi->ahb_base)) { 1853 dev_err(dev, "Cannot remap AHB address.\n"); 1854 ret = PTR_ERR(cqspi->ahb_base); 1855 return ret; 1856 } 1857 cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; 1858 cqspi->ahb_size = resource_size(res_ahb); 1859 1860 init_completion(&cqspi->transfer_complete); 1861 1862 /* Obtain IRQ line. */ 1863 irq = platform_get_irq(pdev, 0); 1864 if (irq < 0) 1865 return -ENXIO; 1866 1867 ret = pm_runtime_set_active(dev); 1868 if (ret) 1869 return ret; 1870 1871 ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks); 1872 if (ret) { 1873 dev_err(dev, "Cannot enable QSPI clocks.\n"); 1874 goto disable_rpm; 1875 } 1876 1877 /* Obtain QSPI reset control */ 1878 rstc = devm_reset_control_get_optional_exclusive(dev, "qspi"); 1879 if (IS_ERR(rstc)) { 1880 ret = PTR_ERR(rstc); 1881 dev_err(dev, "Cannot get QSPI reset.\n"); 1882 goto disable_clks; 1883 } 1884 1885 rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp"); 1886 if (IS_ERR(rstc_ocp)) { 1887 ret = PTR_ERR(rstc_ocp); 1888 dev_err(dev, "Cannot get QSPI OCP reset.\n"); 1889 goto disable_clks; 1890 } 1891 1892 if (cqspi->is_jh7110) { 1893 rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref"); 1894 if (IS_ERR(rstc_ref)) { 1895 ret = PTR_ERR(rstc_ref); 1896 dev_err(dev, "Cannot get QSPI REF reset.\n"); 1897 goto disable_clks; 1898 } 1899 reset_control_assert(rstc_ref); 1900 reset_control_deassert(rstc_ref); 1901 } 1902 1903 reset_control_assert(rstc); 1904 reset_control_deassert(rstc); 1905 1906 reset_control_assert(rstc_ocp); 1907 reset_control_deassert(rstc_ocp); 1908 1909 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clks[CLK_QSPI_REF].clk); 1910 if (!cqspi->is_rzn1) { 1911 host->max_speed_hz = cqspi->master_ref_clk_hz; 1912 } else { 1913 host->max_speed_hz = cqspi->master_ref_clk_hz / 2; 1914 host->min_speed_hz = cqspi->master_ref_clk_hz / 32; 1915 } 1916 1917 /* write completion is supported by default */ 1918 cqspi->wr_completion = true; 1919 1920 if (ddata) { 1921 if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) 1922 cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, 1923 cqspi->master_ref_clk_hz); 1924 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL) 1925 host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; 1926 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD) 1927 host->mode_bits |= SPI_TX_QUAD; 1928 if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) { 1929 cqspi->use_direct_mode = true; 1930 cqspi->use_direct_mode_wr = true; 1931 } 1932 if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA) 1933 cqspi->use_dma_read = true; 1934 if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION) 1935 cqspi->wr_completion = false; 1936 if (ddata->quirks & CQSPI_SLOW_SRAM) 1937 cqspi->slow_sram = true; 1938 if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR) 1939 cqspi->apb_ahb_hazard = true; 1940 if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) 1941 cqspi->disable_stig_mode = true; 1942 1943 if (ddata->quirks & CQSPI_DMA_SET_MASK) { 1944 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1945 if (ret) 1946 goto disable_clks; 1947 } 1948 } 1949 1950 refcount_set(&cqspi->refcount, 1); 1951 refcount_set(&cqspi->inflight_ops, 1); 1952 1953 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, 1954 pdev->name, cqspi); 1955 if (ret) { 1956 dev_err(dev, "Cannot request IRQ.\n"); 1957 goto disable_clks; 1958 } 1959 1960 cqspi_wait_idle(cqspi); 1961 cqspi_controller_enable(cqspi, 0); 1962 cqspi_controller_detect_fifo_depth(cqspi); 1963 cqspi_controller_init(cqspi); 1964 cqspi_controller_enable(cqspi, 1); 1965 cqspi->current_cs = -1; 1966 cqspi->sclk = 0; 1967 1968 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 1969 pm_runtime_enable(dev); 1970 pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); 1971 pm_runtime_use_autosuspend(dev); 1972 pm_runtime_get_noresume(dev); 1973 } 1974 1975 host->num_chipselect = cqspi->num_chipselect; 1976 1977 if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)) 1978 cqspi_device_reset(cqspi); 1979 1980 if (cqspi->use_direct_mode && !cqspi->is_rzn1) { 1981 ret = cqspi_request_mmap_dma(cqspi); 1982 if (ret == -EPROBE_DEFER) { 1983 dev_err_probe(&pdev->dev, ret, "Failed to request mmap DMA\n"); 1984 goto disable_controller; 1985 } 1986 } 1987 1988 ret = spi_register_controller(host); 1989 if (ret) { 1990 dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret); 1991 goto release_dma_chan; 1992 } 1993 1994 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) 1995 pm_runtime_put_autosuspend(dev); 1996 1997 return 0; 1998 1999 release_dma_chan: 2000 if (cqspi->rx_chan) 2001 dma_release_channel(cqspi->rx_chan); 2002 disable_controller: 2003 cqspi_controller_enable(cqspi, 0); 2004 disable_clks: 2005 if (pm_runtime_get_sync(&pdev->dev) >= 0) 2006 clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); 2007 disable_rpm: 2008 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) 2009 pm_runtime_disable(dev); 2010 2011 return ret; 2012 } 2013 2014 static void cqspi_remove(struct platform_device *pdev) 2015 { 2016 const struct cqspi_driver_platdata *ddata; 2017 struct cqspi_st *cqspi = platform_get_drvdata(pdev); 2018 struct device *dev = &pdev->dev; 2019 int ret = 0; 2020 2021 ddata = of_device_get_match_data(dev); 2022 2023 refcount_set(&cqspi->refcount, 0); 2024 2025 if (!refcount_dec_and_test(&cqspi->inflight_ops)) 2026 cqspi_wait_idle(cqspi); 2027 2028 spi_unregister_controller(cqspi->host); 2029 2030 if (cqspi->rx_chan) 2031 dma_release_channel(cqspi->rx_chan); 2032 2033 cqspi_controller_enable(cqspi, 0); 2034 2035 2036 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) 2037 ret = pm_runtime_get_sync(&pdev->dev); 2038 2039 if (ret >= 0) 2040 clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); 2041 2042 if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { 2043 pm_runtime_put_sync(&pdev->dev); 2044 pm_runtime_disable(&pdev->dev); 2045 } 2046 } 2047 2048 static int cqspi_runtime_suspend(struct device *dev) 2049 { 2050 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2051 2052 cqspi_controller_enable(cqspi, 0); 2053 clk_bulk_disable_unprepare(CLK_QSPI_NUM, cqspi->clks); 2054 return 0; 2055 } 2056 2057 static int cqspi_runtime_resume(struct device *dev) 2058 { 2059 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2060 int ret; 2061 2062 ret = clk_bulk_prepare_enable(CLK_QSPI_NUM, cqspi->clks); 2063 if (ret) 2064 return ret; 2065 2066 cqspi_wait_idle(cqspi); 2067 cqspi_controller_enable(cqspi, 0); 2068 cqspi_controller_init(cqspi); 2069 cqspi_controller_enable(cqspi, 1); 2070 2071 cqspi->current_cs = -1; 2072 cqspi->sclk = 0; 2073 return 0; 2074 } 2075 2076 static int cqspi_suspend(struct device *dev) 2077 { 2078 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2079 int ret; 2080 2081 ret = spi_controller_suspend(cqspi->host); 2082 if (ret) 2083 return ret; 2084 2085 return pm_runtime_force_suspend(dev); 2086 } 2087 2088 static int cqspi_resume(struct device *dev) 2089 { 2090 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2091 int ret; 2092 2093 ret = pm_runtime_force_resume(dev); 2094 if (ret) { 2095 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 2096 return ret; 2097 } 2098 2099 return spi_controller_resume(cqspi->host); 2100 } 2101 2102 static const struct dev_pm_ops cqspi_dev_pm_ops = { 2103 RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL) 2104 SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume) 2105 }; 2106 2107 static const struct cqspi_driver_platdata cdns_qspi = { 2108 .quirks = CQSPI_DISABLE_DAC_MODE, 2109 }; 2110 2111 static const struct cqspi_driver_platdata k2g_qspi = { 2112 .quirks = CQSPI_NEEDS_WR_DELAY, 2113 }; 2114 2115 static const struct cqspi_driver_platdata am654_ospi = { 2116 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD, 2117 .quirks = CQSPI_NEEDS_WR_DELAY, 2118 }; 2119 2120 static const struct cqspi_driver_platdata intel_lgm_qspi = { 2121 .quirks = CQSPI_DISABLE_DAC_MODE, 2122 }; 2123 2124 static const struct cqspi_driver_platdata socfpga_qspi = { 2125 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | 2126 CQSPI_SLOW_SRAM | CQSPI_DISABLE_STIG_MODE | 2127 CQSPI_DISABLE_RUNTIME_PM, 2128 }; 2129 2130 static const struct cqspi_driver_platdata versal_ospi = { 2131 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2132 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA | 2133 CQSPI_DMA_SET_MASK, 2134 .indirect_read_dma = cqspi_versal_indirect_read_dma, 2135 .get_dma_status = cqspi_get_versal_dma_status, 2136 }; 2137 2138 static const struct cqspi_driver_platdata versal2_ospi = { 2139 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2140 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA | 2141 CQSPI_DMA_SET_MASK | CQSPI_SUPPORT_DEVICE_RESET, 2142 .indirect_read_dma = cqspi_versal_indirect_read_dma, 2143 .get_dma_status = cqspi_get_versal_dma_status, 2144 }; 2145 2146 static const struct cqspi_driver_platdata jh7110_qspi = { 2147 .quirks = CQSPI_DISABLE_DAC_MODE, 2148 }; 2149 2150 static const struct cqspi_driver_platdata pensando_cdns_qspi = { 2151 .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE, 2152 }; 2153 2154 static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = { 2155 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2156 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | 2157 CQSPI_RD_NO_IRQ, 2158 }; 2159 2160 static const struct cqspi_driver_platdata renesas_rzn1_qspi = { 2161 .hwcaps_mask = CQSPI_SUPPORTS_QUAD, 2162 .quirks = CQSPI_NO_SUPPORT_WR_COMPLETION | CQSPI_RD_NO_IRQ | 2163 CQSPI_HAS_WR_PROTECT | CQSPI_NO_INDIRECT_MODE, 2164 }; 2165 2166 static const struct of_device_id cqspi_dt_ids[] = { 2167 { 2168 .compatible = "cdns,qspi-nor", 2169 .data = &cdns_qspi, 2170 }, 2171 { 2172 .compatible = "ti,k2g-qspi", 2173 .data = &k2g_qspi, 2174 }, 2175 { 2176 .compatible = "ti,am654-ospi", 2177 .data = &am654_ospi, 2178 }, 2179 { 2180 .compatible = "intel,lgm-qspi", 2181 .data = &intel_lgm_qspi, 2182 }, 2183 { 2184 .compatible = "xlnx,versal-ospi-1.0", 2185 .data = &versal_ospi, 2186 }, 2187 { 2188 .compatible = "intel,socfpga-qspi", 2189 .data = &socfpga_qspi, 2190 }, 2191 { 2192 .compatible = "starfive,jh7110-qspi", 2193 .data = &jh7110_qspi, 2194 }, 2195 { 2196 .compatible = "amd,pensando-elba-qspi", 2197 .data = &pensando_cdns_qspi, 2198 }, 2199 { 2200 .compatible = "mobileye,eyeq5-ospi", 2201 .data = &mobileye_eyeq5_ospi, 2202 }, 2203 { 2204 .compatible = "amd,versal2-ospi", 2205 .data = &versal2_ospi, 2206 }, 2207 { 2208 .compatible = "renesas,rzn1-qspi", 2209 .data = &renesas_rzn1_qspi, 2210 }, 2211 { /* end of table */ } 2212 }; 2213 2214 MODULE_DEVICE_TABLE(of, cqspi_dt_ids); 2215 2216 static struct platform_driver cqspi_platform_driver = { 2217 .probe = cqspi_probe, 2218 .remove = cqspi_remove, 2219 .driver = { 2220 .name = CQSPI_NAME, 2221 .pm = pm_ptr(&cqspi_dev_pm_ops), 2222 .of_match_table = cqspi_dt_ids, 2223 }, 2224 }; 2225 2226 module_platform_driver(cqspi_platform_driver); 2227 2228 MODULE_DESCRIPTION("Cadence QSPI Controller Driver"); 2229 MODULE_LICENSE("GPL v2"); 2230 MODULE_ALIAS("platform:" CQSPI_NAME); 2231 MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); 2232 MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>"); 2233 MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>"); 2234 MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); 2235 MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>"); 2236