1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Driver for Cadence QSPI Controller 4 // 5 // Copyright Altera Corporation (C) 2012-2014. All rights reserved. 6 // Copyright Intel Corporation (C) 2019-2020. All rights reserved. 7 // Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com 8 9 #include <linux/clk.h> 10 #include <linux/completion.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/err.h> 15 #include <linux/errno.h> 16 #include <linux/firmware/xlnx-zynqmp.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/jiffies.h> 21 #include <linux/kernel.h> 22 #include <linux/log2.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/reset.h> 28 #include <linux/sched.h> 29 #include <linux/spi/spi.h> 30 #include <linux/spi/spi-mem.h> 31 #include <linux/timer.h> 32 33 #define CQSPI_NAME "cadence-qspi" 34 #define CQSPI_MAX_CHIPSELECT 4 35 36 static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX); 37 38 /* Quirks */ 39 #define CQSPI_NEEDS_WR_DELAY BIT(0) 40 #define CQSPI_DISABLE_DAC_MODE BIT(1) 41 #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) 42 #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) 43 #define CQSPI_SLOW_SRAM BIT(4) 44 #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) 45 #define CQSPI_RD_NO_IRQ BIT(6) 46 #define CQSPI_DMA_SET_MASK BIT(7) 47 #define CQSPI_SUPPORT_DEVICE_RESET BIT(8) 48 #define CQSPI_DISABLE_STIG_MODE BIT(9) 49 50 /* Capabilities */ 51 #define CQSPI_SUPPORTS_OCTAL BIT(0) 52 #define CQSPI_SUPPORTS_QUAD BIT(1) 53 54 #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0) 55 56 enum { 57 CLK_QSPI_APB = 0, 58 CLK_QSPI_AHB, 59 CLK_QSPI_NUM, 60 }; 61 62 struct cqspi_st; 63 64 struct cqspi_flash_pdata { 65 struct cqspi_st *cqspi; 66 u32 clk_rate; 67 u32 read_delay; 68 u32 tshsl_ns; 69 u32 tsd2d_ns; 70 u32 tchsh_ns; 71 u32 tslch_ns; 72 u8 cs; 73 }; 74 75 struct cqspi_st { 76 struct platform_device *pdev; 77 struct spi_controller *host; 78 struct clk *clk; 79 struct clk *clks[CLK_QSPI_NUM]; 80 unsigned int sclk; 81 82 void __iomem *iobase; 83 void __iomem *ahb_base; 84 resource_size_t ahb_size; 85 struct completion transfer_complete; 86 87 struct dma_chan *rx_chan; 88 struct completion rx_dma_complete; 89 dma_addr_t mmap_phys_base; 90 91 int current_cs; 92 unsigned long master_ref_clk_hz; 93 bool is_decoded_cs; 94 u32 fifo_depth; 95 u32 fifo_width; 96 u32 num_chipselect; 97 bool rclk_en; 98 u32 trigger_address; 99 u32 wr_delay; 100 bool use_direct_mode; 101 bool use_direct_mode_wr; 102 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; 103 bool use_dma_read; 104 u32 pd_dev_id; 105 bool wr_completion; 106 bool slow_sram; 107 bool apb_ahb_hazard; 108 109 bool is_jh7110; /* Flag for StarFive JH7110 SoC */ 110 bool disable_stig_mode; 111 refcount_t refcount; 112 refcount_t inflight_ops; 113 114 const struct cqspi_driver_platdata *ddata; 115 }; 116 117 struct cqspi_driver_platdata { 118 u32 hwcaps_mask; 119 u16 quirks; 120 int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, 121 u_char *rxbuf, loff_t from_addr, size_t n_rx); 122 u32 (*get_dma_status)(struct cqspi_st *cqspi); 123 int (*jh7110_clk_init)(struct platform_device *pdev, 124 struct cqspi_st *cqspi); 125 }; 126 127 /* Operation timeout value */ 128 #define CQSPI_TIMEOUT_MS 500 129 #define CQSPI_READ_TIMEOUT_MS 10 130 #define CQSPI_BUSYWAIT_TIMEOUT_US 500 131 132 /* Runtime_pm autosuspend delay */ 133 #define CQSPI_AUTOSUSPEND_TIMEOUT 2000 134 135 #define CQSPI_DUMMY_CLKS_PER_BYTE 8 136 #define CQSPI_DUMMY_BYTES_MAX 4 137 #define CQSPI_DUMMY_CLKS_MAX 31 138 139 #define CQSPI_STIG_DATA_LEN_MAX 8 140 141 /* Register map */ 142 #define CQSPI_REG_CONFIG 0x00 143 #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) 144 #define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7) 145 #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) 146 #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 147 #define CQSPI_REG_CONFIG_DMA_MASK BIT(15) 148 #define CQSPI_REG_CONFIG_BAUD_LSB 19 149 #define CQSPI_REG_CONFIG_DTR_PROTO BIT(24) 150 #define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30) 151 #define CQSPI_REG_CONFIG_IDLE_LSB 31 152 #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF 153 #define CQSPI_REG_CONFIG_BAUD_MASK 0xF 154 #define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5) 155 #define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6) 156 157 #define CQSPI_REG_RD_INSTR 0x04 158 #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 159 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 160 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 161 #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 162 #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 163 #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 164 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 165 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 166 #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 167 #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F 168 169 #define CQSPI_REG_WR_INSTR 0x08 170 #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 171 #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 172 #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 173 174 #define CQSPI_REG_DELAY 0x0C 175 #define CQSPI_REG_DELAY_TSLCH_LSB 0 176 #define CQSPI_REG_DELAY_TCHSH_LSB 8 177 #define CQSPI_REG_DELAY_TSD2D_LSB 16 178 #define CQSPI_REG_DELAY_TSHSL_LSB 24 179 #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF 180 #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF 181 #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF 182 #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF 183 184 #define CQSPI_REG_READCAPTURE 0x10 185 #define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 186 #define CQSPI_REG_READCAPTURE_DELAY_LSB 1 187 #define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF 188 189 #define CQSPI_REG_SIZE 0x14 190 #define CQSPI_REG_SIZE_ADDRESS_LSB 0 191 #define CQSPI_REG_SIZE_PAGE_LSB 4 192 #define CQSPI_REG_SIZE_BLOCK_LSB 16 193 #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF 194 #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF 195 #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F 196 197 #define CQSPI_REG_SRAMPARTITION 0x18 198 #define CQSPI_REG_INDIRECTTRIGGER 0x1C 199 200 #define CQSPI_REG_DMA 0x20 201 #define CQSPI_REG_DMA_SINGLE_LSB 0 202 #define CQSPI_REG_DMA_BURST_LSB 8 203 #define CQSPI_REG_DMA_SINGLE_MASK 0xFF 204 #define CQSPI_REG_DMA_BURST_MASK 0xFF 205 206 #define CQSPI_REG_REMAP 0x24 207 #define CQSPI_REG_MODE_BIT 0x28 208 209 #define CQSPI_REG_SDRAMLEVEL 0x2C 210 #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 211 #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 212 #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF 213 #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF 214 215 #define CQSPI_REG_WR_COMPLETION_CTRL 0x38 216 #define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14) 217 218 #define CQSPI_REG_IRQSTATUS 0x40 219 #define CQSPI_REG_IRQMASK 0x44 220 221 #define CQSPI_REG_INDIRECTRD 0x60 222 #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) 223 #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) 224 #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) 225 226 #define CQSPI_REG_INDIRECTRDWATERMARK 0x64 227 #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 228 #define CQSPI_REG_INDIRECTRDBYTES 0x6C 229 230 #define CQSPI_REG_CMDCTRL 0x90 231 #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) 232 #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) 233 #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7 234 #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 235 #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 236 #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 237 #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 238 #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 239 #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 240 #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 241 #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 242 #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 243 #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 244 #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F 245 246 #define CQSPI_REG_INDIRECTWR 0x70 247 #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) 248 #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) 249 #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) 250 251 #define CQSPI_REG_INDIRECTWRWATERMARK 0x74 252 #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 253 #define CQSPI_REG_INDIRECTWRBYTES 0x7C 254 255 #define CQSPI_REG_INDTRIG_ADDRRANGE 0x80 256 257 #define CQSPI_REG_CMDADDRESS 0x94 258 #define CQSPI_REG_CMDREADDATALOWER 0xA0 259 #define CQSPI_REG_CMDREADDATAUPPER 0xA4 260 #define CQSPI_REG_CMDWRITEDATALOWER 0xA8 261 #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC 262 263 #define CQSPI_REG_POLLING_STATUS 0xB0 264 #define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16 265 266 #define CQSPI_REG_OP_EXT_LOWER 0xE0 267 #define CQSPI_REG_OP_EXT_READ_LSB 24 268 #define CQSPI_REG_OP_EXT_WRITE_LSB 16 269 #define CQSPI_REG_OP_EXT_STIG_LSB 0 270 271 #define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000 272 273 #define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800 274 #define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804 275 276 #define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C 277 278 #define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814 279 #define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818 280 #define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C 281 #define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1) 282 283 #define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828 284 285 #define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00 286 #define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6 287 288 /* Interrupt status bits */ 289 #define CQSPI_REG_IRQ_MODE_ERR BIT(0) 290 #define CQSPI_REG_IRQ_UNDERFLOW BIT(1) 291 #define CQSPI_REG_IRQ_IND_COMP BIT(2) 292 #define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) 293 #define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) 294 #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) 295 #define CQSPI_REG_IRQ_WATERMARK BIT(6) 296 #define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) 297 298 #define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ 299 CQSPI_REG_IRQ_IND_SRAM_FULL | \ 300 CQSPI_REG_IRQ_IND_COMP) 301 302 #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ 303 CQSPI_REG_IRQ_WATERMARK | \ 304 CQSPI_REG_IRQ_UNDERFLOW) 305 306 #define CQSPI_IRQ_STATUS_MASK 0x1FFFF 307 #define CQSPI_DMA_UNALIGN 0x3 308 309 #define CQSPI_REG_VERSAL_DMA_VAL 0x602 310 311 static int cqspi_wait_for_bit(const struct cqspi_driver_platdata *ddata, 312 void __iomem *reg, const u32 mask, bool clr, 313 bool busywait) 314 { 315 u64 timeout_us = CQSPI_TIMEOUT_MS * USEC_PER_MSEC; 316 u32 val; 317 318 if (busywait) { 319 int ret = readl_relaxed_poll_timeout(reg, val, 320 (((clr ? ~val : val) & mask) == mask), 321 0, CQSPI_BUSYWAIT_TIMEOUT_US); 322 323 if (ret != -ETIMEDOUT) 324 return ret; 325 326 timeout_us -= CQSPI_BUSYWAIT_TIMEOUT_US; 327 } 328 329 return readl_relaxed_poll_timeout(reg, val, 330 (((clr ? ~val : val) & mask) == mask), 331 10, timeout_us); 332 } 333 334 static bool cqspi_is_idle(struct cqspi_st *cqspi) 335 { 336 u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 337 338 return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB); 339 } 340 341 static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) 342 { 343 u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL); 344 345 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; 346 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; 347 } 348 349 static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi) 350 { 351 u32 dma_status; 352 353 dma_status = readl(cqspi->iobase + 354 CQSPI_REG_VERSAL_DMA_DST_I_STS); 355 writel(dma_status, cqspi->iobase + 356 CQSPI_REG_VERSAL_DMA_DST_I_STS); 357 358 return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK; 359 } 360 361 static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) 362 { 363 struct cqspi_st *cqspi = dev; 364 const struct cqspi_driver_platdata *ddata = cqspi->ddata; 365 unsigned int irq_status; 366 367 /* Read interrupt status */ 368 irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); 369 370 /* Clear interrupt */ 371 writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); 372 373 if (cqspi->use_dma_read && ddata && ddata->get_dma_status) { 374 if (ddata->get_dma_status(cqspi)) { 375 complete(&cqspi->transfer_complete); 376 return IRQ_HANDLED; 377 } 378 } 379 380 else if (!cqspi->slow_sram) 381 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; 382 else 383 irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR; 384 385 if (irq_status) 386 complete(&cqspi->transfer_complete); 387 388 return IRQ_HANDLED; 389 } 390 391 static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op) 392 { 393 u32 rdreg = 0; 394 395 rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; 396 rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; 397 rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; 398 399 return rdreg; 400 } 401 402 static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op) 403 { 404 unsigned int dummy_clk; 405 406 if (!op->dummy.nbytes) 407 return 0; 408 409 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth); 410 if (op->cmd.dtr) 411 dummy_clk /= 2; 412 413 return dummy_clk; 414 } 415 416 static int cqspi_wait_idle(struct cqspi_st *cqspi) 417 { 418 const unsigned int poll_idle_retry = 3; 419 unsigned int count = 0; 420 unsigned long timeout; 421 422 timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); 423 while (1) { 424 /* 425 * Read few times in succession to ensure the controller 426 * is indeed idle, that is, the bit does not transition 427 * low again. 428 */ 429 if (cqspi_is_idle(cqspi)) 430 count++; 431 else 432 count = 0; 433 434 if (count >= poll_idle_retry) 435 return 0; 436 437 if (time_after(jiffies, timeout)) { 438 /* Timeout, in busy mode. */ 439 dev_err(&cqspi->pdev->dev, 440 "QSPI is still busy after %dms timeout.\n", 441 CQSPI_TIMEOUT_MS); 442 return -ETIMEDOUT; 443 } 444 445 cpu_relax(); 446 } 447 } 448 449 static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) 450 { 451 void __iomem *reg_base = cqspi->iobase; 452 int ret; 453 454 /* Write the CMDCTRL without start execution. */ 455 writel(reg, reg_base + CQSPI_REG_CMDCTRL); 456 /* Start execute */ 457 reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; 458 writel(reg, reg_base + CQSPI_REG_CMDCTRL); 459 460 /* Polling for completion. */ 461 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_CMDCTRL, 462 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1, true); 463 if (ret) { 464 dev_err(&cqspi->pdev->dev, 465 "Flash command execution timed out.\n"); 466 return ret; 467 } 468 469 /* Polling QSPI idle status. */ 470 return cqspi_wait_idle(cqspi); 471 } 472 473 static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata, 474 const struct spi_mem_op *op, 475 unsigned int shift) 476 { 477 struct cqspi_st *cqspi = f_pdata->cqspi; 478 void __iomem *reg_base = cqspi->iobase; 479 unsigned int reg; 480 u8 ext; 481 482 if (op->cmd.nbytes != 2) 483 return -EINVAL; 484 485 /* Opcode extension is the LSB. */ 486 ext = op->cmd.opcode & 0xff; 487 488 reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER); 489 reg &= ~(0xff << shift); 490 reg |= ext << shift; 491 writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER); 492 493 return 0; 494 } 495 496 static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata, 497 const struct spi_mem_op *op, unsigned int shift) 498 { 499 struct cqspi_st *cqspi = f_pdata->cqspi; 500 void __iomem *reg_base = cqspi->iobase; 501 unsigned int reg; 502 int ret; 503 504 reg = readl(reg_base + CQSPI_REG_CONFIG); 505 506 /* 507 * We enable dual byte opcode here. The callers have to set up the 508 * extension opcode based on which type of operation it is. 509 */ 510 if (op->cmd.dtr) { 511 reg |= CQSPI_REG_CONFIG_DTR_PROTO; 512 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE; 513 514 /* Set up command opcode extension. */ 515 ret = cqspi_setup_opcode_ext(f_pdata, op, shift); 516 if (ret) 517 return ret; 518 } else { 519 unsigned int mask = CQSPI_REG_CONFIG_DTR_PROTO | CQSPI_REG_CONFIG_DUAL_OPCODE; 520 /* Shortcut if DTR is already disabled. */ 521 if ((reg & mask) == 0) 522 return 0; 523 reg &= ~mask; 524 } 525 526 writel(reg, reg_base + CQSPI_REG_CONFIG); 527 528 return cqspi_wait_idle(cqspi); 529 } 530 531 static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata, 532 const struct spi_mem_op *op) 533 { 534 struct cqspi_st *cqspi = f_pdata->cqspi; 535 void __iomem *reg_base = cqspi->iobase; 536 u8 *rxbuf = op->data.buf.in; 537 u8 opcode; 538 size_t n_rx = op->data.nbytes; 539 unsigned int rdreg; 540 unsigned int reg; 541 unsigned int dummy_clk; 542 size_t read_len; 543 int status; 544 545 status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); 546 if (status) 547 return status; 548 549 if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { 550 dev_err(&cqspi->pdev->dev, 551 "Invalid input argument, len %zu rxbuf 0x%p\n", 552 n_rx, rxbuf); 553 return -EINVAL; 554 } 555 556 if (op->cmd.dtr) 557 opcode = op->cmd.opcode >> 8; 558 else 559 opcode = op->cmd.opcode; 560 561 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 562 563 rdreg = cqspi_calc_rdreg(op); 564 writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); 565 566 dummy_clk = cqspi_calc_dummy(op); 567 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 568 return -EOPNOTSUPP; 569 570 if (dummy_clk) 571 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK) 572 << CQSPI_REG_CMDCTRL_DUMMY_LSB; 573 574 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); 575 576 /* 0 means 1 byte. */ 577 reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) 578 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); 579 580 /* setup ADDR BIT field */ 581 if (op->addr.nbytes) { 582 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 583 reg |= ((op->addr.nbytes - 1) & 584 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 585 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 586 587 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 588 } 589 590 status = cqspi_exec_flash_cmd(cqspi, reg); 591 if (status) 592 return status; 593 594 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER); 595 596 /* Put the read value into rx_buf */ 597 read_len = (n_rx > 4) ? 4 : n_rx; 598 memcpy(rxbuf, ®, read_len); 599 rxbuf += read_len; 600 601 if (n_rx > 4) { 602 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER); 603 604 read_len = n_rx - read_len; 605 memcpy(rxbuf, ®, read_len); 606 } 607 608 /* Reset CMD_CTRL Reg once command read completes */ 609 writel(0, reg_base + CQSPI_REG_CMDCTRL); 610 611 return 0; 612 } 613 614 static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata, 615 const struct spi_mem_op *op) 616 { 617 struct cqspi_st *cqspi = f_pdata->cqspi; 618 void __iomem *reg_base = cqspi->iobase; 619 u8 opcode; 620 const u8 *txbuf = op->data.buf.out; 621 size_t n_tx = op->data.nbytes; 622 unsigned int reg; 623 unsigned int data; 624 size_t write_len; 625 int ret; 626 627 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); 628 if (ret) 629 return ret; 630 631 if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { 632 dev_err(&cqspi->pdev->dev, 633 "Invalid input argument, cmdlen %zu txbuf 0x%p\n", 634 n_tx, txbuf); 635 return -EINVAL; 636 } 637 638 reg = cqspi_calc_rdreg(op); 639 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 640 641 if (op->cmd.dtr) 642 opcode = op->cmd.opcode >> 8; 643 else 644 opcode = op->cmd.opcode; 645 646 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 647 648 if (op->addr.nbytes) { 649 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 650 reg |= ((op->addr.nbytes - 1) & 651 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 652 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 653 654 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 655 } 656 657 if (n_tx) { 658 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB); 659 reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) 660 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; 661 data = 0; 662 write_len = (n_tx > 4) ? 4 : n_tx; 663 memcpy(&data, txbuf, write_len); 664 txbuf += write_len; 665 writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); 666 667 if (n_tx > 4) { 668 data = 0; 669 write_len = n_tx - 4; 670 memcpy(&data, txbuf, write_len); 671 writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER); 672 } 673 } 674 675 ret = cqspi_exec_flash_cmd(cqspi, reg); 676 677 /* Reset CMD_CTRL Reg once command write completes */ 678 writel(0, reg_base + CQSPI_REG_CMDCTRL); 679 680 return ret; 681 } 682 683 static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, 684 const struct spi_mem_op *op) 685 { 686 struct cqspi_st *cqspi = f_pdata->cqspi; 687 void __iomem *reg_base = cqspi->iobase; 688 unsigned int dummy_clk = 0; 689 unsigned int reg; 690 int ret; 691 u8 opcode; 692 693 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB); 694 if (ret) 695 return ret; 696 697 if (op->cmd.dtr) 698 opcode = op->cmd.opcode >> 8; 699 else 700 opcode = op->cmd.opcode; 701 702 reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; 703 reg |= cqspi_calc_rdreg(op); 704 705 /* Setup dummy clock cycles */ 706 dummy_clk = cqspi_calc_dummy(op); 707 708 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 709 return -EOPNOTSUPP; 710 711 if (dummy_clk) 712 reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) 713 << CQSPI_REG_RD_INSTR_DUMMY_LSB; 714 715 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 716 717 /* Set address width */ 718 reg = readl(reg_base + CQSPI_REG_SIZE); 719 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 720 reg |= (op->addr.nbytes - 1); 721 writel(reg, reg_base + CQSPI_REG_SIZE); 722 return 0; 723 } 724 725 static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, 726 u8 *rxbuf, loff_t from_addr, 727 const size_t n_rx) 728 { 729 struct cqspi_st *cqspi = f_pdata->cqspi; 730 bool use_irq = !(cqspi->ddata && cqspi->ddata->quirks & CQSPI_RD_NO_IRQ); 731 struct device *dev = &cqspi->pdev->dev; 732 void __iomem *reg_base = cqspi->iobase; 733 void __iomem *ahb_base = cqspi->ahb_base; 734 unsigned int remaining = n_rx; 735 unsigned int mod_bytes = n_rx % 4; 736 unsigned int bytes_to_read = 0; 737 u8 *rxbuf_end = rxbuf + n_rx; 738 int ret = 0; 739 740 if (!refcount_read(&cqspi->refcount)) 741 return -ENODEV; 742 743 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 744 writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); 745 746 /* Clear all interrupts. */ 747 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 748 749 /* 750 * On SoCFPGA platform reading the SRAM is slow due to 751 * hardware limitation and causing read interrupt storm to CPU, 752 * so enabling only watermark interrupt to disable all read 753 * interrupts later as we want to run "bytes to read" loop with 754 * all the read interrupts disabled for max performance. 755 */ 756 757 if (use_irq && cqspi->slow_sram) 758 writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); 759 else if (use_irq) 760 writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); 761 else 762 writel(0, reg_base + CQSPI_REG_IRQMASK); 763 764 reinit_completion(&cqspi->transfer_complete); 765 writel(CQSPI_REG_INDIRECTRD_START_MASK, 766 reg_base + CQSPI_REG_INDIRECTRD); 767 768 while (remaining > 0) { 769 if (use_irq && 770 !wait_for_completion_timeout(&cqspi->transfer_complete, 771 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) 772 ret = -ETIMEDOUT; 773 774 /* 775 * Disable all read interrupts until 776 * we are out of "bytes to read" 777 */ 778 if (cqspi->slow_sram) 779 writel(0x0, reg_base + CQSPI_REG_IRQMASK); 780 781 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 782 783 if (ret && bytes_to_read == 0) { 784 dev_err(dev, "Indirect read timeout, no bytes\n"); 785 goto failrd; 786 } 787 788 while (bytes_to_read != 0) { 789 unsigned int word_remain = round_down(remaining, 4); 790 791 bytes_to_read *= cqspi->fifo_width; 792 bytes_to_read = bytes_to_read > remaining ? 793 remaining : bytes_to_read; 794 bytes_to_read = round_down(bytes_to_read, 4); 795 /* Read 4 byte word chunks then single bytes */ 796 if (bytes_to_read) { 797 ioread32_rep(ahb_base, rxbuf, 798 (bytes_to_read / 4)); 799 } else if (!word_remain && mod_bytes) { 800 unsigned int temp = ioread32(ahb_base); 801 802 bytes_to_read = mod_bytes; 803 memcpy(rxbuf, &temp, min((unsigned int) 804 (rxbuf_end - rxbuf), 805 bytes_to_read)); 806 } 807 rxbuf += bytes_to_read; 808 remaining -= bytes_to_read; 809 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 810 } 811 812 if (use_irq && remaining > 0) { 813 reinit_completion(&cqspi->transfer_complete); 814 if (cqspi->slow_sram) 815 writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); 816 } 817 } 818 819 /* Check indirect done status */ 820 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTRD, 821 CQSPI_REG_INDIRECTRD_DONE_MASK, 0, true); 822 if (ret) { 823 dev_err(dev, "Indirect read completion error (%i)\n", ret); 824 goto failrd; 825 } 826 827 /* Disable interrupt */ 828 writel(0, reg_base + CQSPI_REG_IRQMASK); 829 830 /* Clear indirect completion status */ 831 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD); 832 833 return 0; 834 835 failrd: 836 /* Disable interrupt */ 837 writel(0, reg_base + CQSPI_REG_IRQMASK); 838 839 /* Cancel the indirect read */ 840 writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK, 841 reg_base + CQSPI_REG_INDIRECTRD); 842 return ret; 843 } 844 845 static void cqspi_device_reset(struct cqspi_st *cqspi) 846 { 847 u32 reg; 848 849 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 850 reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK; 851 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 852 /* 853 * NOTE: Delay timing implementation is derived from 854 * spi_nor_hw_reset() 855 */ 856 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 857 usleep_range(1, 5); 858 writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 859 usleep_range(100, 150); 860 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 861 usleep_range(1000, 1200); 862 } 863 864 static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) 865 { 866 void __iomem *reg_base = cqspi->iobase; 867 unsigned int reg; 868 869 reg = readl(reg_base + CQSPI_REG_CONFIG); 870 871 if (enable) 872 reg |= CQSPI_REG_CONFIG_ENABLE_MASK; 873 else 874 reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; 875 876 writel(reg, reg_base + CQSPI_REG_CONFIG); 877 } 878 879 static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata, 880 u_char *rxbuf, loff_t from_addr, 881 size_t n_rx) 882 { 883 struct cqspi_st *cqspi = f_pdata->cqspi; 884 struct device *dev = &cqspi->pdev->dev; 885 void __iomem *reg_base = cqspi->iobase; 886 u32 reg, bytes_to_dma; 887 loff_t addr = from_addr; 888 void *buf = rxbuf; 889 dma_addr_t dma_addr; 890 u8 bytes_rem; 891 int ret = 0; 892 893 bytes_rem = n_rx % 4; 894 bytes_to_dma = (n_rx - bytes_rem); 895 896 if (!bytes_to_dma) 897 goto nondmard; 898 899 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA); 900 if (ret) 901 return ret; 902 903 cqspi_controller_enable(cqspi, 0); 904 905 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 906 reg |= CQSPI_REG_CONFIG_DMA_MASK; 907 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 908 909 cqspi_controller_enable(cqspi, 1); 910 911 dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE); 912 if (dma_mapping_error(dev, dma_addr)) { 913 dev_err(dev, "dma mapping failed\n"); 914 return -ENOMEM; 915 } 916 917 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 918 writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES); 919 writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL, 920 reg_base + CQSPI_REG_INDTRIG_ADDRRANGE); 921 922 /* Clear all interrupts. */ 923 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 924 925 /* Enable DMA done interrupt */ 926 writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK, 927 reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN); 928 929 /* Default DMA periph configuration */ 930 writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA); 931 932 /* Configure DMA Dst address */ 933 writel(lower_32_bits(dma_addr), 934 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR); 935 writel(upper_32_bits(dma_addr), 936 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB); 937 938 /* Configure DMA Src address */ 939 writel(cqspi->trigger_address, reg_base + 940 CQSPI_REG_VERSAL_DMA_SRC_ADDR); 941 942 /* Set DMA destination size */ 943 writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE); 944 945 /* Set DMA destination control */ 946 writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL, 947 reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL); 948 949 writel(CQSPI_REG_INDIRECTRD_START_MASK, 950 reg_base + CQSPI_REG_INDIRECTRD); 951 952 reinit_completion(&cqspi->transfer_complete); 953 954 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 955 msecs_to_jiffies(max_t(size_t, bytes_to_dma, 500)))) { 956 ret = -ETIMEDOUT; 957 goto failrd; 958 } 959 960 /* Disable DMA interrupt */ 961 writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 962 963 /* Clear indirect completion status */ 964 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, 965 cqspi->iobase + CQSPI_REG_INDIRECTRD); 966 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 967 968 cqspi_controller_enable(cqspi, 0); 969 970 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 971 reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 972 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 973 974 cqspi_controller_enable(cqspi, 1); 975 976 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, 977 PM_OSPI_MUX_SEL_LINEAR); 978 if (ret) 979 return ret; 980 981 nondmard: 982 if (bytes_rem) { 983 addr += bytes_to_dma; 984 buf += bytes_to_dma; 985 ret = cqspi_indirect_read_execute(f_pdata, buf, addr, 986 bytes_rem); 987 if (ret) 988 return ret; 989 } 990 991 return 0; 992 993 failrd: 994 /* Disable DMA interrupt */ 995 writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 996 997 /* Cancel the indirect read */ 998 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 999 reg_base + CQSPI_REG_INDIRECTRD); 1000 1001 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 1002 1003 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1004 reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 1005 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1006 1007 zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR); 1008 1009 return ret; 1010 } 1011 1012 static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, 1013 const struct spi_mem_op *op) 1014 { 1015 unsigned int reg; 1016 int ret; 1017 struct cqspi_st *cqspi = f_pdata->cqspi; 1018 void __iomem *reg_base = cqspi->iobase; 1019 u8 opcode; 1020 1021 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB); 1022 if (ret) 1023 return ret; 1024 1025 if (op->cmd.dtr) 1026 opcode = op->cmd.opcode >> 8; 1027 else 1028 opcode = op->cmd.opcode; 1029 1030 /* Set opcode. */ 1031 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; 1032 reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB; 1033 reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB; 1034 writel(reg, reg_base + CQSPI_REG_WR_INSTR); 1035 reg = cqspi_calc_rdreg(op); 1036 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 1037 1038 /* 1039 * SPI NAND flashes require the address of the status register to be 1040 * passed in the Read SR command. Also, some SPI NOR flashes like the 1041 * cypress Semper flash expect a 4-byte dummy address in the Read SR 1042 * command in DTR mode. 1043 * 1044 * But this controller does not support address phase in the Read SR 1045 * command when doing auto-HW polling. So, disable write completion 1046 * polling on the controller's side. spinand and spi-nor will take 1047 * care of polling the status register. 1048 */ 1049 if (cqspi->wr_completion) { 1050 reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); 1051 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; 1052 writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); 1053 /* 1054 * DAC mode require auto polling as flash needs to be polled 1055 * for write completion in case of bubble in SPI transaction 1056 * due to slow CPU/DMA master. 1057 */ 1058 cqspi->use_direct_mode_wr = false; 1059 } 1060 1061 reg = readl(reg_base + CQSPI_REG_SIZE); 1062 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 1063 reg |= (op->addr.nbytes - 1); 1064 writel(reg, reg_base + CQSPI_REG_SIZE); 1065 return 0; 1066 } 1067 1068 static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, 1069 loff_t to_addr, const u8 *txbuf, 1070 const size_t n_tx) 1071 { 1072 struct cqspi_st *cqspi = f_pdata->cqspi; 1073 struct device *dev = &cqspi->pdev->dev; 1074 void __iomem *reg_base = cqspi->iobase; 1075 unsigned int remaining = n_tx; 1076 unsigned int write_bytes; 1077 int ret; 1078 1079 if (!refcount_read(&cqspi->refcount)) 1080 return -ENODEV; 1081 1082 writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); 1083 writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); 1084 1085 /* Clear all interrupts. */ 1086 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 1087 1088 writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK); 1089 1090 reinit_completion(&cqspi->transfer_complete); 1091 writel(CQSPI_REG_INDIRECTWR_START_MASK, 1092 reg_base + CQSPI_REG_INDIRECTWR); 1093 /* 1094 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access 1095 * Controller programming sequence, couple of cycles of 1096 * QSPI_REF_CLK delay is required for the above bit to 1097 * be internally synchronized by the QSPI module. Provide 5 1098 * cycles of delay. 1099 */ 1100 if (cqspi->wr_delay) 1101 ndelay(cqspi->wr_delay); 1102 1103 /* 1104 * If a hazard exists between the APB and AHB interfaces, perform a 1105 * dummy readback from the controller to ensure synchronization. 1106 */ 1107 if (cqspi->apb_ahb_hazard) 1108 readl(reg_base + CQSPI_REG_INDIRECTWR); 1109 1110 while (remaining > 0) { 1111 size_t write_words, mod_bytes; 1112 1113 write_bytes = remaining; 1114 write_words = write_bytes / 4; 1115 mod_bytes = write_bytes % 4; 1116 /* Write 4 bytes at a time then single bytes. */ 1117 if (write_words) { 1118 iowrite32_rep(cqspi->ahb_base, txbuf, write_words); 1119 txbuf += (write_words * 4); 1120 } 1121 if (mod_bytes) { 1122 unsigned int temp = 0xFFFFFFFF; 1123 1124 memcpy(&temp, txbuf, mod_bytes); 1125 iowrite32(temp, cqspi->ahb_base); 1126 txbuf += mod_bytes; 1127 } 1128 1129 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 1130 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { 1131 dev_err(dev, "Indirect write timeout\n"); 1132 ret = -ETIMEDOUT; 1133 goto failwr; 1134 } 1135 1136 remaining -= write_bytes; 1137 1138 if (remaining > 0) 1139 reinit_completion(&cqspi->transfer_complete); 1140 } 1141 1142 /* Check indirect done status */ 1143 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTWR, 1144 CQSPI_REG_INDIRECTWR_DONE_MASK, 0, false); 1145 if (ret) { 1146 dev_err(dev, "Indirect write completion error (%i)\n", ret); 1147 goto failwr; 1148 } 1149 1150 /* Disable interrupt. */ 1151 writel(0, reg_base + CQSPI_REG_IRQMASK); 1152 1153 /* Clear indirect completion status */ 1154 writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR); 1155 1156 cqspi_wait_idle(cqspi); 1157 1158 return 0; 1159 1160 failwr: 1161 /* Disable interrupt. */ 1162 writel(0, reg_base + CQSPI_REG_IRQMASK); 1163 1164 /* Cancel the indirect write */ 1165 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 1166 reg_base + CQSPI_REG_INDIRECTWR); 1167 return ret; 1168 } 1169 1170 static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata) 1171 { 1172 struct cqspi_st *cqspi = f_pdata->cqspi; 1173 void __iomem *reg_base = cqspi->iobase; 1174 unsigned int chip_select = f_pdata->cs; 1175 unsigned int reg; 1176 1177 reg = readl(reg_base + CQSPI_REG_CONFIG); 1178 if (cqspi->is_decoded_cs) { 1179 reg |= CQSPI_REG_CONFIG_DECODE_MASK; 1180 } else { 1181 reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; 1182 1183 /* Convert CS if without decoder. 1184 * CS0 to 4b'1110 1185 * CS1 to 4b'1101 1186 * CS2 to 4b'1011 1187 * CS3 to 4b'0111 1188 */ 1189 chip_select = 0xF & ~(1 << chip_select); 1190 } 1191 1192 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK 1193 << CQSPI_REG_CONFIG_CHIPSELECT_LSB); 1194 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) 1195 << CQSPI_REG_CONFIG_CHIPSELECT_LSB; 1196 writel(reg, reg_base + CQSPI_REG_CONFIG); 1197 } 1198 1199 static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, 1200 const unsigned int ns_val) 1201 { 1202 unsigned int ticks; 1203 1204 ticks = ref_clk_hz / 1000; /* kHz */ 1205 ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); 1206 1207 return ticks; 1208 } 1209 1210 static void cqspi_delay(struct cqspi_flash_pdata *f_pdata) 1211 { 1212 struct cqspi_st *cqspi = f_pdata->cqspi; 1213 void __iomem *iobase = cqspi->iobase; 1214 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; 1215 unsigned int tshsl, tchsh, tslch, tsd2d; 1216 unsigned int reg; 1217 unsigned int tsclk; 1218 1219 /* calculate the number of ref ticks for one sclk tick */ 1220 tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk); 1221 1222 tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns); 1223 /* this particular value must be at least one sclk */ 1224 if (tshsl < tsclk) 1225 tshsl = tsclk; 1226 1227 tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns); 1228 tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns); 1229 tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns); 1230 1231 reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) 1232 << CQSPI_REG_DELAY_TSHSL_LSB; 1233 reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) 1234 << CQSPI_REG_DELAY_TCHSH_LSB; 1235 reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) 1236 << CQSPI_REG_DELAY_TSLCH_LSB; 1237 reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) 1238 << CQSPI_REG_DELAY_TSD2D_LSB; 1239 writel(reg, iobase + CQSPI_REG_DELAY); 1240 } 1241 1242 static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) 1243 { 1244 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; 1245 void __iomem *reg_base = cqspi->iobase; 1246 u32 reg, div; 1247 1248 /* Recalculate the baudrate divisor based on QSPI specification. */ 1249 div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1; 1250 1251 /* Maximum baud divisor */ 1252 if (div > CQSPI_REG_CONFIG_BAUD_MASK) { 1253 div = CQSPI_REG_CONFIG_BAUD_MASK; 1254 dev_warn(&cqspi->pdev->dev, 1255 "Unable to adjust clock <= %d hz. Reduced to %d hz\n", 1256 cqspi->sclk, ref_clk_hz/((div+1)*2)); 1257 } 1258 1259 reg = readl(reg_base + CQSPI_REG_CONFIG); 1260 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); 1261 reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; 1262 writel(reg, reg_base + CQSPI_REG_CONFIG); 1263 } 1264 1265 static void cqspi_readdata_capture(struct cqspi_st *cqspi, 1266 const bool bypass, 1267 const unsigned int delay) 1268 { 1269 void __iomem *reg_base = cqspi->iobase; 1270 unsigned int reg; 1271 1272 reg = readl(reg_base + CQSPI_REG_READCAPTURE); 1273 1274 if (bypass) 1275 reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); 1276 else 1277 reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); 1278 1279 reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK 1280 << CQSPI_REG_READCAPTURE_DELAY_LSB); 1281 1282 reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) 1283 << CQSPI_REG_READCAPTURE_DELAY_LSB; 1284 1285 writel(reg, reg_base + CQSPI_REG_READCAPTURE); 1286 } 1287 1288 static void cqspi_configure(struct cqspi_flash_pdata *f_pdata, 1289 unsigned long sclk) 1290 { 1291 struct cqspi_st *cqspi = f_pdata->cqspi; 1292 int switch_cs = (cqspi->current_cs != f_pdata->cs); 1293 int switch_ck = (cqspi->sclk != sclk); 1294 1295 if (switch_cs || switch_ck) 1296 cqspi_controller_enable(cqspi, 0); 1297 1298 /* Switch chip select. */ 1299 if (switch_cs) { 1300 cqspi->current_cs = f_pdata->cs; 1301 cqspi_chipselect(f_pdata); 1302 } 1303 1304 /* Setup baudrate divisor and delays */ 1305 if (switch_ck) { 1306 cqspi->sclk = sclk; 1307 cqspi_config_baudrate_div(cqspi); 1308 cqspi_delay(f_pdata); 1309 cqspi_readdata_capture(cqspi, !cqspi->rclk_en, 1310 f_pdata->read_delay); 1311 } 1312 1313 if (switch_cs || switch_ck) 1314 cqspi_controller_enable(cqspi, 1); 1315 } 1316 1317 static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata, 1318 const struct spi_mem_op *op) 1319 { 1320 struct cqspi_st *cqspi = f_pdata->cqspi; 1321 loff_t to = op->addr.val; 1322 size_t len = op->data.nbytes; 1323 const u_char *buf = op->data.buf.out; 1324 int ret; 1325 1326 ret = cqspi_write_setup(f_pdata, op); 1327 if (ret) 1328 return ret; 1329 1330 /* 1331 * Some flashes like the Cypress Semper flash expect a dummy 4-byte 1332 * address (all 0s) with the read status register command in DTR mode. 1333 * But this controller does not support sending dummy address bytes to 1334 * the flash when it is polling the write completion register in DTR 1335 * mode. So, we can not use direct mode when in DTR mode for writing 1336 * data. 1337 */ 1338 if (!op->cmd.dtr && cqspi->use_direct_mode && 1339 cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) { 1340 memcpy_toio(cqspi->ahb_base + to, buf, len); 1341 return cqspi_wait_idle(cqspi); 1342 } 1343 1344 return cqspi_indirect_write_execute(f_pdata, to, buf, len); 1345 } 1346 1347 static void cqspi_rx_dma_callback(void *param) 1348 { 1349 struct cqspi_st *cqspi = param; 1350 1351 complete(&cqspi->rx_dma_complete); 1352 } 1353 1354 static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata, 1355 u_char *buf, loff_t from, size_t len) 1356 { 1357 struct cqspi_st *cqspi = f_pdata->cqspi; 1358 struct device *dev = &cqspi->pdev->dev; 1359 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 1360 dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from; 1361 int ret = 0; 1362 struct dma_async_tx_descriptor *tx; 1363 dma_cookie_t cookie; 1364 dma_addr_t dma_dst; 1365 struct device *ddev; 1366 1367 if (!cqspi->rx_chan || !virt_addr_valid(buf)) { 1368 memcpy_fromio(buf, cqspi->ahb_base + from, len); 1369 return 0; 1370 } 1371 1372 ddev = cqspi->rx_chan->device->dev; 1373 dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE); 1374 if (dma_mapping_error(ddev, dma_dst)) { 1375 dev_err(dev, "dma mapping failed\n"); 1376 return -ENOMEM; 1377 } 1378 tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src, 1379 len, flags); 1380 if (!tx) { 1381 dev_err(dev, "device_prep_dma_memcpy error\n"); 1382 ret = -EIO; 1383 goto err_unmap; 1384 } 1385 1386 tx->callback = cqspi_rx_dma_callback; 1387 tx->callback_param = cqspi; 1388 cookie = tx->tx_submit(tx); 1389 reinit_completion(&cqspi->rx_dma_complete); 1390 1391 ret = dma_submit_error(cookie); 1392 if (ret) { 1393 dev_err(dev, "dma_submit_error %d\n", cookie); 1394 ret = -EIO; 1395 goto err_unmap; 1396 } 1397 1398 dma_async_issue_pending(cqspi->rx_chan); 1399 if (!wait_for_completion_timeout(&cqspi->rx_dma_complete, 1400 msecs_to_jiffies(max_t(size_t, len, 500)))) { 1401 dmaengine_terminate_sync(cqspi->rx_chan); 1402 dev_err(dev, "DMA wait_for_completion_timeout\n"); 1403 ret = -ETIMEDOUT; 1404 goto err_unmap; 1405 } 1406 1407 err_unmap: 1408 dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE); 1409 1410 return ret; 1411 } 1412 1413 static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata, 1414 const struct spi_mem_op *op) 1415 { 1416 struct cqspi_st *cqspi = f_pdata->cqspi; 1417 const struct cqspi_driver_platdata *ddata = cqspi->ddata; 1418 loff_t from = op->addr.val; 1419 size_t len = op->data.nbytes; 1420 u_char *buf = op->data.buf.in; 1421 u64 dma_align = (u64)(uintptr_t)buf; 1422 int ret; 1423 1424 ret = cqspi_read_setup(f_pdata, op); 1425 if (ret) 1426 return ret; 1427 1428 if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) 1429 return cqspi_direct_read_execute(f_pdata, buf, from, len); 1430 1431 if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma && 1432 virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0)) 1433 return ddata->indirect_read_dma(f_pdata, buf, from, len); 1434 1435 return cqspi_indirect_read_execute(f_pdata, buf, from, len); 1436 } 1437 1438 static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) 1439 { 1440 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1441 struct cqspi_flash_pdata *f_pdata; 1442 1443 f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)]; 1444 cqspi_configure(f_pdata, op->max_freq); 1445 1446 if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) { 1447 /* 1448 * Performing reads in DAC mode forces to read minimum 4 bytes 1449 * which is unsupported on some flash devices during register 1450 * reads, prefer STIG mode for such small reads. 1451 */ 1452 if (!op->addr.nbytes || 1453 (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX && 1454 !cqspi->disable_stig_mode)) 1455 return cqspi_command_read(f_pdata, op); 1456 1457 return cqspi_read(f_pdata, op); 1458 } 1459 1460 if (!op->addr.nbytes || !op->data.buf.out) 1461 return cqspi_command_write(f_pdata, op); 1462 1463 return cqspi_write(f_pdata, op); 1464 } 1465 1466 static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) 1467 { 1468 int ret; 1469 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1470 struct device *dev = &cqspi->pdev->dev; 1471 1472 if (refcount_read(&cqspi->inflight_ops) == 0) 1473 return -ENODEV; 1474 1475 ret = pm_runtime_resume_and_get(dev); 1476 if (ret) { 1477 dev_err(&mem->spi->dev, "resume failed with %d\n", ret); 1478 return ret; 1479 } 1480 1481 if (!refcount_read(&cqspi->refcount)) 1482 return -EBUSY; 1483 1484 refcount_inc(&cqspi->inflight_ops); 1485 1486 if (!refcount_read(&cqspi->refcount)) { 1487 if (refcount_read(&cqspi->inflight_ops)) 1488 refcount_dec(&cqspi->inflight_ops); 1489 return -EBUSY; 1490 } 1491 1492 ret = cqspi_mem_process(mem, op); 1493 1494 pm_runtime_put_autosuspend(dev); 1495 1496 if (ret) 1497 dev_err(&mem->spi->dev, "operation failed with %d\n", ret); 1498 1499 if (refcount_read(&cqspi->inflight_ops) > 1) 1500 refcount_dec(&cqspi->inflight_ops); 1501 1502 return ret; 1503 } 1504 1505 static bool cqspi_supports_mem_op(struct spi_mem *mem, 1506 const struct spi_mem_op *op) 1507 { 1508 bool all_true, all_false; 1509 1510 /* 1511 * op->dummy.dtr is required for converting nbytes into ncycles. 1512 * Also, don't check the dtr field of the op phase having zero nbytes. 1513 */ 1514 all_true = op->cmd.dtr && 1515 (!op->addr.nbytes || op->addr.dtr) && 1516 (!op->dummy.nbytes || op->dummy.dtr) && 1517 (!op->data.nbytes || op->data.dtr); 1518 1519 all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr && 1520 !op->data.dtr; 1521 1522 if (all_true) { 1523 /* Right now we only support 8-8-8 DTR mode. */ 1524 if (op->cmd.nbytes && op->cmd.buswidth != 8) 1525 return false; 1526 if (op->addr.nbytes && op->addr.buswidth != 8) 1527 return false; 1528 if (op->data.nbytes && op->data.buswidth != 8) 1529 return false; 1530 } else if (!all_false) { 1531 /* Mixed DTR modes are not supported. */ 1532 return false; 1533 } 1534 1535 return spi_mem_default_supports_op(mem, op); 1536 } 1537 1538 static int cqspi_of_get_flash_pdata(struct platform_device *pdev, 1539 struct cqspi_flash_pdata *f_pdata, 1540 struct device_node *np) 1541 { 1542 if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) { 1543 dev_err(&pdev->dev, "couldn't determine read-delay\n"); 1544 return -ENXIO; 1545 } 1546 1547 if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) { 1548 dev_err(&pdev->dev, "couldn't determine tshsl-ns\n"); 1549 return -ENXIO; 1550 } 1551 1552 if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) { 1553 dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n"); 1554 return -ENXIO; 1555 } 1556 1557 if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) { 1558 dev_err(&pdev->dev, "couldn't determine tchsh-ns\n"); 1559 return -ENXIO; 1560 } 1561 1562 if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) { 1563 dev_err(&pdev->dev, "couldn't determine tslch-ns\n"); 1564 return -ENXIO; 1565 } 1566 1567 if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) { 1568 dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n"); 1569 return -ENXIO; 1570 } 1571 1572 return 0; 1573 } 1574 1575 static int cqspi_of_get_pdata(struct cqspi_st *cqspi) 1576 { 1577 struct device *dev = &cqspi->pdev->dev; 1578 struct device_node *np = dev->of_node; 1579 u32 id[2]; 1580 1581 cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); 1582 1583 if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { 1584 /* Zero signals FIFO depth should be runtime detected. */ 1585 cqspi->fifo_depth = 0; 1586 } 1587 1588 if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { 1589 dev_err(dev, "couldn't determine fifo-width\n"); 1590 return -ENXIO; 1591 } 1592 1593 if (of_property_read_u32(np, "cdns,trigger-address", 1594 &cqspi->trigger_address)) { 1595 dev_err(dev, "couldn't determine trigger-address\n"); 1596 return -ENXIO; 1597 } 1598 1599 if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect)) 1600 cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT; 1601 1602 cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); 1603 1604 if (!of_property_read_u32_array(np, "power-domains", id, 1605 ARRAY_SIZE(id))) 1606 cqspi->pd_dev_id = id[1]; 1607 1608 return 0; 1609 } 1610 1611 static void cqspi_controller_init(struct cqspi_st *cqspi) 1612 { 1613 u32 reg; 1614 1615 /* Configure the remap address register, no remap */ 1616 writel(0, cqspi->iobase + CQSPI_REG_REMAP); 1617 1618 /* Disable all interrupts. */ 1619 writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); 1620 1621 /* Configure the SRAM split to 1:1 . */ 1622 writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1623 1624 /* Load indirect trigger address. */ 1625 writel(cqspi->trigger_address, 1626 cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); 1627 1628 /* Program read watermark -- 1/2 of the FIFO. */ 1629 writel(cqspi->fifo_depth * cqspi->fifo_width / 2, 1630 cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); 1631 /* Program write watermark -- 1/8 of the FIFO. */ 1632 writel(cqspi->fifo_depth * cqspi->fifo_width / 8, 1633 cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); 1634 1635 /* Disable direct access controller */ 1636 if (!cqspi->use_direct_mode) { 1637 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1638 reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL; 1639 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1640 } 1641 1642 /* Enable DMA interface */ 1643 if (cqspi->use_dma_read) { 1644 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1645 reg |= CQSPI_REG_CONFIG_DMA_MASK; 1646 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1647 } 1648 } 1649 1650 static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi) 1651 { 1652 struct device *dev = &cqspi->pdev->dev; 1653 u32 reg, fifo_depth; 1654 1655 /* 1656 * Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N 1657 * the FIFO depth. 1658 */ 1659 writel(U32_MAX, cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1660 reg = readl(cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1661 fifo_depth = reg + 1; 1662 1663 /* FIFO depth of zero means no value from devicetree was provided. */ 1664 if (cqspi->fifo_depth == 0) { 1665 cqspi->fifo_depth = fifo_depth; 1666 dev_dbg(dev, "using FIFO depth of %u\n", fifo_depth); 1667 } else if (fifo_depth != cqspi->fifo_depth) { 1668 dev_warn(dev, "detected FIFO depth (%u) different from config (%u)\n", 1669 fifo_depth, cqspi->fifo_depth); 1670 } 1671 } 1672 1673 static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) 1674 { 1675 dma_cap_mask_t mask; 1676 1677 dma_cap_zero(mask); 1678 dma_cap_set(DMA_MEMCPY, mask); 1679 1680 cqspi->rx_chan = dma_request_chan_by_mask(&mask); 1681 if (IS_ERR(cqspi->rx_chan)) { 1682 int ret = PTR_ERR(cqspi->rx_chan); 1683 1684 cqspi->rx_chan = NULL; 1685 if (ret == -ENODEV) { 1686 /* DMA support is not mandatory */ 1687 dev_info(&cqspi->pdev->dev, "No Rx DMA available\n"); 1688 return 0; 1689 } 1690 1691 return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n"); 1692 } 1693 init_completion(&cqspi->rx_dma_complete); 1694 1695 return 0; 1696 } 1697 1698 static const char *cqspi_get_name(struct spi_mem *mem) 1699 { 1700 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1701 struct device *dev = &cqspi->pdev->dev; 1702 1703 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), 1704 spi_get_chipselect(mem->spi, 0)); 1705 } 1706 1707 static const struct spi_controller_mem_ops cqspi_mem_ops = { 1708 .exec_op = cqspi_exec_mem_op, 1709 .get_name = cqspi_get_name, 1710 .supports_op = cqspi_supports_mem_op, 1711 }; 1712 1713 static const struct spi_controller_mem_caps cqspi_mem_caps = { 1714 .dtr = true, 1715 .per_op_freq = true, 1716 }; 1717 1718 static int cqspi_setup_flash(struct cqspi_st *cqspi) 1719 { 1720 unsigned int max_cs = cqspi->num_chipselect - 1; 1721 struct platform_device *pdev = cqspi->pdev; 1722 struct device *dev = &pdev->dev; 1723 struct cqspi_flash_pdata *f_pdata; 1724 unsigned int cs; 1725 int ret; 1726 1727 /* Get flash device data */ 1728 for_each_available_child_of_node_scoped(dev->of_node, np) { 1729 ret = of_property_read_u32(np, "reg", &cs); 1730 if (ret) { 1731 dev_err(dev, "Couldn't determine chip select.\n"); 1732 return ret; 1733 } 1734 1735 if (cs >= cqspi->num_chipselect) { 1736 dev_err(dev, "Chip select %d out of range.\n", cs); 1737 return -EINVAL; 1738 } else if (cs < max_cs) { 1739 max_cs = cs; 1740 } 1741 1742 f_pdata = &cqspi->f_pdata[cs]; 1743 f_pdata->cqspi = cqspi; 1744 f_pdata->cs = cs; 1745 1746 ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); 1747 if (ret) 1748 return ret; 1749 } 1750 1751 cqspi->num_chipselect = max_cs + 1; 1752 return 0; 1753 } 1754 1755 static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi) 1756 { 1757 static struct clk_bulk_data qspiclk[] = { 1758 { .id = "apb" }, 1759 { .id = "ahb" }, 1760 }; 1761 1762 int ret = 0; 1763 1764 ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk); 1765 if (ret) { 1766 dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__); 1767 return ret; 1768 } 1769 1770 cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk; 1771 cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk; 1772 1773 ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]); 1774 if (ret) { 1775 dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__); 1776 return ret; 1777 } 1778 1779 ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]); 1780 if (ret) { 1781 dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__); 1782 goto disable_apb_clk; 1783 } 1784 1785 cqspi->is_jh7110 = true; 1786 1787 return 0; 1788 1789 disable_apb_clk: 1790 clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]); 1791 1792 return ret; 1793 } 1794 1795 static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi) 1796 { 1797 clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]); 1798 clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]); 1799 } 1800 static int cqspi_probe(struct platform_device *pdev) 1801 { 1802 const struct cqspi_driver_platdata *ddata; 1803 struct reset_control *rstc, *rstc_ocp, *rstc_ref; 1804 struct device *dev = &pdev->dev; 1805 struct spi_controller *host; 1806 struct resource *res_ahb; 1807 struct cqspi_st *cqspi; 1808 int ret; 1809 int irq; 1810 1811 host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi)); 1812 if (!host) 1813 return -ENOMEM; 1814 1815 host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL; 1816 host->mem_ops = &cqspi_mem_ops; 1817 host->mem_caps = &cqspi_mem_caps; 1818 host->dev.of_node = pdev->dev.of_node; 1819 1820 cqspi = spi_controller_get_devdata(host); 1821 1822 cqspi->pdev = pdev; 1823 cqspi->host = host; 1824 cqspi->is_jh7110 = false; 1825 cqspi->ddata = ddata = of_device_get_match_data(dev); 1826 platform_set_drvdata(pdev, cqspi); 1827 1828 /* Obtain configuration from OF. */ 1829 ret = cqspi_of_get_pdata(cqspi); 1830 if (ret) { 1831 dev_err(dev, "Cannot get mandatory OF data.\n"); 1832 return -ENODEV; 1833 } 1834 1835 /* Obtain QSPI clock. */ 1836 cqspi->clk = devm_clk_get(dev, NULL); 1837 if (IS_ERR(cqspi->clk)) { 1838 dev_err(dev, "Cannot claim QSPI clock.\n"); 1839 ret = PTR_ERR(cqspi->clk); 1840 return ret; 1841 } 1842 1843 /* Obtain and remap controller address. */ 1844 cqspi->iobase = devm_platform_ioremap_resource(pdev, 0); 1845 if (IS_ERR(cqspi->iobase)) { 1846 dev_err(dev, "Cannot remap controller address.\n"); 1847 ret = PTR_ERR(cqspi->iobase); 1848 return ret; 1849 } 1850 1851 /* Obtain and remap AHB address. */ 1852 cqspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res_ahb); 1853 if (IS_ERR(cqspi->ahb_base)) { 1854 dev_err(dev, "Cannot remap AHB address.\n"); 1855 ret = PTR_ERR(cqspi->ahb_base); 1856 return ret; 1857 } 1858 cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; 1859 cqspi->ahb_size = resource_size(res_ahb); 1860 1861 init_completion(&cqspi->transfer_complete); 1862 1863 /* Obtain IRQ line. */ 1864 irq = platform_get_irq(pdev, 0); 1865 if (irq < 0) 1866 return -ENXIO; 1867 1868 ret = pm_runtime_set_active(dev); 1869 if (ret) 1870 return ret; 1871 1872 1873 ret = clk_prepare_enable(cqspi->clk); 1874 if (ret) { 1875 dev_err(dev, "Cannot enable QSPI clock.\n"); 1876 goto probe_clk_failed; 1877 } 1878 1879 /* Obtain QSPI reset control */ 1880 rstc = devm_reset_control_get_optional_exclusive(dev, "qspi"); 1881 if (IS_ERR(rstc)) { 1882 ret = PTR_ERR(rstc); 1883 dev_err(dev, "Cannot get QSPI reset.\n"); 1884 goto probe_reset_failed; 1885 } 1886 1887 rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp"); 1888 if (IS_ERR(rstc_ocp)) { 1889 ret = PTR_ERR(rstc_ocp); 1890 dev_err(dev, "Cannot get QSPI OCP reset.\n"); 1891 goto probe_reset_failed; 1892 } 1893 1894 if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) { 1895 rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref"); 1896 if (IS_ERR(rstc_ref)) { 1897 ret = PTR_ERR(rstc_ref); 1898 dev_err(dev, "Cannot get QSPI REF reset.\n"); 1899 goto probe_reset_failed; 1900 } 1901 reset_control_assert(rstc_ref); 1902 reset_control_deassert(rstc_ref); 1903 } 1904 1905 reset_control_assert(rstc); 1906 reset_control_deassert(rstc); 1907 1908 reset_control_assert(rstc_ocp); 1909 reset_control_deassert(rstc_ocp); 1910 1911 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); 1912 host->max_speed_hz = cqspi->master_ref_clk_hz; 1913 1914 /* write completion is supported by default */ 1915 cqspi->wr_completion = true; 1916 1917 if (ddata) { 1918 if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) 1919 cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, 1920 cqspi->master_ref_clk_hz); 1921 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL) 1922 host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; 1923 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD) 1924 host->mode_bits |= SPI_TX_QUAD; 1925 if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) { 1926 cqspi->use_direct_mode = true; 1927 cqspi->use_direct_mode_wr = true; 1928 } 1929 if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA) 1930 cqspi->use_dma_read = true; 1931 if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION) 1932 cqspi->wr_completion = false; 1933 if (ddata->quirks & CQSPI_SLOW_SRAM) 1934 cqspi->slow_sram = true; 1935 if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR) 1936 cqspi->apb_ahb_hazard = true; 1937 1938 if (ddata->jh7110_clk_init) { 1939 ret = cqspi_jh7110_clk_init(pdev, cqspi); 1940 if (ret) 1941 goto probe_reset_failed; 1942 } 1943 if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) 1944 cqspi->disable_stig_mode = true; 1945 1946 if (ddata->quirks & CQSPI_DMA_SET_MASK) { 1947 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1948 if (ret) 1949 goto probe_reset_failed; 1950 } 1951 } 1952 1953 refcount_set(&cqspi->refcount, 1); 1954 refcount_set(&cqspi->inflight_ops, 1); 1955 1956 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, 1957 pdev->name, cqspi); 1958 if (ret) { 1959 dev_err(dev, "Cannot request IRQ.\n"); 1960 goto probe_reset_failed; 1961 } 1962 1963 cqspi_wait_idle(cqspi); 1964 cqspi_controller_enable(cqspi, 0); 1965 cqspi_controller_detect_fifo_depth(cqspi); 1966 cqspi_controller_init(cqspi); 1967 cqspi_controller_enable(cqspi, 1); 1968 cqspi->current_cs = -1; 1969 cqspi->sclk = 0; 1970 1971 ret = cqspi_setup_flash(cqspi); 1972 if (ret) { 1973 dev_err(dev, "failed to setup flash parameters %d\n", ret); 1974 goto probe_setup_failed; 1975 } 1976 1977 host->num_chipselect = cqspi->num_chipselect; 1978 1979 if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)) 1980 cqspi_device_reset(cqspi); 1981 1982 if (cqspi->use_direct_mode) { 1983 ret = cqspi_request_mmap_dma(cqspi); 1984 if (ret == -EPROBE_DEFER) 1985 goto probe_setup_failed; 1986 } 1987 1988 pm_runtime_enable(dev); 1989 1990 pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); 1991 pm_runtime_use_autosuspend(dev); 1992 pm_runtime_get_noresume(dev); 1993 1994 ret = spi_register_controller(host); 1995 if (ret) { 1996 dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret); 1997 goto probe_setup_failed; 1998 } 1999 2000 pm_runtime_put_autosuspend(dev); 2001 2002 return 0; 2003 probe_setup_failed: 2004 cqspi_controller_enable(cqspi, 0); 2005 pm_runtime_disable(dev); 2006 probe_reset_failed: 2007 if (cqspi->is_jh7110) 2008 cqspi_jh7110_disable_clk(pdev, cqspi); 2009 clk_disable_unprepare(cqspi->clk); 2010 probe_clk_failed: 2011 return ret; 2012 } 2013 2014 static void cqspi_remove(struct platform_device *pdev) 2015 { 2016 struct cqspi_st *cqspi = platform_get_drvdata(pdev); 2017 2018 refcount_set(&cqspi->refcount, 0); 2019 2020 if (!refcount_dec_and_test(&cqspi->inflight_ops)) 2021 cqspi_wait_idle(cqspi); 2022 2023 spi_unregister_controller(cqspi->host); 2024 cqspi_controller_enable(cqspi, 0); 2025 2026 if (cqspi->rx_chan) 2027 dma_release_channel(cqspi->rx_chan); 2028 2029 if (pm_runtime_get_sync(&pdev->dev) >= 0) 2030 clk_disable(cqspi->clk); 2031 2032 if (cqspi->is_jh7110) 2033 cqspi_jh7110_disable_clk(pdev, cqspi); 2034 2035 pm_runtime_put_sync(&pdev->dev); 2036 pm_runtime_disable(&pdev->dev); 2037 } 2038 2039 static int cqspi_runtime_suspend(struct device *dev) 2040 { 2041 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2042 2043 cqspi_controller_enable(cqspi, 0); 2044 clk_disable_unprepare(cqspi->clk); 2045 return 0; 2046 } 2047 2048 static int cqspi_runtime_resume(struct device *dev) 2049 { 2050 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2051 2052 clk_prepare_enable(cqspi->clk); 2053 cqspi_wait_idle(cqspi); 2054 cqspi_controller_enable(cqspi, 0); 2055 cqspi_controller_init(cqspi); 2056 cqspi_controller_enable(cqspi, 1); 2057 2058 cqspi->current_cs = -1; 2059 cqspi->sclk = 0; 2060 return 0; 2061 } 2062 2063 static int cqspi_suspend(struct device *dev) 2064 { 2065 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2066 int ret; 2067 2068 ret = spi_controller_suspend(cqspi->host); 2069 if (ret) 2070 return ret; 2071 2072 return pm_runtime_force_suspend(dev); 2073 } 2074 2075 static int cqspi_resume(struct device *dev) 2076 { 2077 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2078 int ret; 2079 2080 ret = pm_runtime_force_resume(dev); 2081 if (ret) { 2082 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 2083 return ret; 2084 } 2085 2086 return spi_controller_resume(cqspi->host); 2087 } 2088 2089 static const struct dev_pm_ops cqspi_dev_pm_ops = { 2090 RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL) 2091 SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume) 2092 }; 2093 2094 static const struct cqspi_driver_platdata cdns_qspi = { 2095 .quirks = CQSPI_DISABLE_DAC_MODE, 2096 }; 2097 2098 static const struct cqspi_driver_platdata k2g_qspi = { 2099 .quirks = CQSPI_NEEDS_WR_DELAY, 2100 }; 2101 2102 static const struct cqspi_driver_platdata am654_ospi = { 2103 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD, 2104 .quirks = CQSPI_NEEDS_WR_DELAY, 2105 }; 2106 2107 static const struct cqspi_driver_platdata intel_lgm_qspi = { 2108 .quirks = CQSPI_DISABLE_DAC_MODE, 2109 }; 2110 2111 static const struct cqspi_driver_platdata socfpga_qspi = { 2112 .quirks = CQSPI_DISABLE_DAC_MODE 2113 | CQSPI_NO_SUPPORT_WR_COMPLETION 2114 | CQSPI_SLOW_SRAM 2115 | CQSPI_DISABLE_STIG_MODE, 2116 }; 2117 2118 static const struct cqspi_driver_platdata versal_ospi = { 2119 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2120 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA 2121 | CQSPI_DMA_SET_MASK, 2122 .indirect_read_dma = cqspi_versal_indirect_read_dma, 2123 .get_dma_status = cqspi_get_versal_dma_status, 2124 }; 2125 2126 static const struct cqspi_driver_platdata versal2_ospi = { 2127 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2128 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA 2129 | CQSPI_DMA_SET_MASK 2130 | CQSPI_SUPPORT_DEVICE_RESET, 2131 .indirect_read_dma = cqspi_versal_indirect_read_dma, 2132 .get_dma_status = cqspi_get_versal_dma_status, 2133 }; 2134 2135 static const struct cqspi_driver_platdata jh7110_qspi = { 2136 .quirks = CQSPI_DISABLE_DAC_MODE, 2137 .jh7110_clk_init = cqspi_jh7110_clk_init, 2138 }; 2139 2140 static const struct cqspi_driver_platdata pensando_cdns_qspi = { 2141 .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE, 2142 }; 2143 2144 static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = { 2145 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2146 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | 2147 CQSPI_RD_NO_IRQ, 2148 }; 2149 2150 static const struct of_device_id cqspi_dt_ids[] = { 2151 { 2152 .compatible = "cdns,qspi-nor", 2153 .data = &cdns_qspi, 2154 }, 2155 { 2156 .compatible = "ti,k2g-qspi", 2157 .data = &k2g_qspi, 2158 }, 2159 { 2160 .compatible = "ti,am654-ospi", 2161 .data = &am654_ospi, 2162 }, 2163 { 2164 .compatible = "intel,lgm-qspi", 2165 .data = &intel_lgm_qspi, 2166 }, 2167 { 2168 .compatible = "xlnx,versal-ospi-1.0", 2169 .data = &versal_ospi, 2170 }, 2171 { 2172 .compatible = "intel,socfpga-qspi", 2173 .data = &socfpga_qspi, 2174 }, 2175 { 2176 .compatible = "starfive,jh7110-qspi", 2177 .data = &jh7110_qspi, 2178 }, 2179 { 2180 .compatible = "amd,pensando-elba-qspi", 2181 .data = &pensando_cdns_qspi, 2182 }, 2183 { 2184 .compatible = "mobileye,eyeq5-ospi", 2185 .data = &mobileye_eyeq5_ospi, 2186 }, 2187 { 2188 .compatible = "amd,versal2-ospi", 2189 .data = &versal2_ospi, 2190 }, 2191 { /* end of table */ } 2192 }; 2193 2194 MODULE_DEVICE_TABLE(of, cqspi_dt_ids); 2195 2196 static struct platform_driver cqspi_platform_driver = { 2197 .probe = cqspi_probe, 2198 .remove = cqspi_remove, 2199 .driver = { 2200 .name = CQSPI_NAME, 2201 .pm = pm_ptr(&cqspi_dev_pm_ops), 2202 .of_match_table = cqspi_dt_ids, 2203 }, 2204 }; 2205 2206 module_platform_driver(cqspi_platform_driver); 2207 2208 MODULE_DESCRIPTION("Cadence QSPI Controller Driver"); 2209 MODULE_LICENSE("GPL v2"); 2210 MODULE_ALIAS("platform:" CQSPI_NAME); 2211 MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); 2212 MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>"); 2213 MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>"); 2214 MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); 2215 MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>"); 2216