1 // SPDX-License-Identifier: GPL-2.0-only 2 // 3 // Driver for Cadence QSPI Controller 4 // 5 // Copyright Altera Corporation (C) 2012-2014. All rights reserved. 6 // Copyright Intel Corporation (C) 2019-2020. All rights reserved. 7 // Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com 8 9 #include <linux/clk.h> 10 #include <linux/completion.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/dmaengine.h> 14 #include <linux/err.h> 15 #include <linux/errno.h> 16 #include <linux/firmware/xlnx-zynqmp.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/jiffies.h> 21 #include <linux/kernel.h> 22 #include <linux/log2.h> 23 #include <linux/module.h> 24 #include <linux/of.h> 25 #include <linux/platform_device.h> 26 #include <linux/pm_runtime.h> 27 #include <linux/reset.h> 28 #include <linux/sched.h> 29 #include <linux/spi/spi.h> 30 #include <linux/spi/spi-mem.h> 31 #include <linux/timer.h> 32 33 #define CQSPI_NAME "cadence-qspi" 34 #define CQSPI_MAX_CHIPSELECT 4 35 36 static_assert(CQSPI_MAX_CHIPSELECT <= SPI_CS_CNT_MAX); 37 38 /* Quirks */ 39 #define CQSPI_NEEDS_WR_DELAY BIT(0) 40 #define CQSPI_DISABLE_DAC_MODE BIT(1) 41 #define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2) 42 #define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3) 43 #define CQSPI_SLOW_SRAM BIT(4) 44 #define CQSPI_NEEDS_APB_AHB_HAZARD_WAR BIT(5) 45 #define CQSPI_RD_NO_IRQ BIT(6) 46 #define CQSPI_DMA_SET_MASK BIT(7) 47 #define CQSPI_SUPPORT_DEVICE_RESET BIT(8) 48 #define CQSPI_DISABLE_STIG_MODE BIT(9) 49 50 /* Capabilities */ 51 #define CQSPI_SUPPORTS_OCTAL BIT(0) 52 #define CQSPI_SUPPORTS_QUAD BIT(1) 53 54 #define CQSPI_OP_WIDTH(part) ((part).nbytes ? ilog2((part).buswidth) : 0) 55 56 enum { 57 CLK_QSPI_APB = 0, 58 CLK_QSPI_AHB, 59 CLK_QSPI_NUM, 60 }; 61 62 struct cqspi_st; 63 64 struct cqspi_flash_pdata { 65 struct cqspi_st *cqspi; 66 u32 clk_rate; 67 u32 read_delay; 68 u32 tshsl_ns; 69 u32 tsd2d_ns; 70 u32 tchsh_ns; 71 u32 tslch_ns; 72 u8 cs; 73 }; 74 75 struct cqspi_st { 76 struct platform_device *pdev; 77 struct spi_controller *host; 78 struct clk *clk; 79 struct clk *clks[CLK_QSPI_NUM]; 80 unsigned int sclk; 81 82 void __iomem *iobase; 83 void __iomem *ahb_base; 84 resource_size_t ahb_size; 85 struct completion transfer_complete; 86 87 struct dma_chan *rx_chan; 88 struct completion rx_dma_complete; 89 dma_addr_t mmap_phys_base; 90 91 int current_cs; 92 unsigned long master_ref_clk_hz; 93 bool is_decoded_cs; 94 u32 fifo_depth; 95 u32 fifo_width; 96 u32 num_chipselect; 97 bool rclk_en; 98 u32 trigger_address; 99 u32 wr_delay; 100 bool use_direct_mode; 101 bool use_direct_mode_wr; 102 struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; 103 bool use_dma_read; 104 u32 pd_dev_id; 105 bool wr_completion; 106 bool slow_sram; 107 bool apb_ahb_hazard; 108 109 bool is_jh7110; /* Flag for StarFive JH7110 SoC */ 110 bool disable_stig_mode; 111 112 const struct cqspi_driver_platdata *ddata; 113 }; 114 115 struct cqspi_driver_platdata { 116 u32 hwcaps_mask; 117 u16 quirks; 118 int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata, 119 u_char *rxbuf, loff_t from_addr, size_t n_rx); 120 u32 (*get_dma_status)(struct cqspi_st *cqspi); 121 int (*jh7110_clk_init)(struct platform_device *pdev, 122 struct cqspi_st *cqspi); 123 }; 124 125 /* Operation timeout value */ 126 #define CQSPI_TIMEOUT_MS 500 127 #define CQSPI_READ_TIMEOUT_MS 10 128 #define CQSPI_BUSYWAIT_TIMEOUT_US 500 129 130 /* Runtime_pm autosuspend delay */ 131 #define CQSPI_AUTOSUSPEND_TIMEOUT 2000 132 133 #define CQSPI_DUMMY_CLKS_PER_BYTE 8 134 #define CQSPI_DUMMY_BYTES_MAX 4 135 #define CQSPI_DUMMY_CLKS_MAX 31 136 137 #define CQSPI_STIG_DATA_LEN_MAX 8 138 139 /* Register map */ 140 #define CQSPI_REG_CONFIG 0x00 141 #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0) 142 #define CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL BIT(7) 143 #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9) 144 #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10 145 #define CQSPI_REG_CONFIG_DMA_MASK BIT(15) 146 #define CQSPI_REG_CONFIG_BAUD_LSB 19 147 #define CQSPI_REG_CONFIG_DTR_PROTO BIT(24) 148 #define CQSPI_REG_CONFIG_DUAL_OPCODE BIT(30) 149 #define CQSPI_REG_CONFIG_IDLE_LSB 31 150 #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF 151 #define CQSPI_REG_CONFIG_BAUD_MASK 0xF 152 #define CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK BIT(5) 153 #define CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK BIT(6) 154 155 #define CQSPI_REG_RD_INSTR 0x04 156 #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0 157 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8 158 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12 159 #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16 160 #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20 161 #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24 162 #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3 163 #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3 164 #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3 165 #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F 166 167 #define CQSPI_REG_WR_INSTR 0x08 168 #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0 169 #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12 170 #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16 171 172 #define CQSPI_REG_DELAY 0x0C 173 #define CQSPI_REG_DELAY_TSLCH_LSB 0 174 #define CQSPI_REG_DELAY_TCHSH_LSB 8 175 #define CQSPI_REG_DELAY_TSD2D_LSB 16 176 #define CQSPI_REG_DELAY_TSHSL_LSB 24 177 #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF 178 #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF 179 #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF 180 #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF 181 182 #define CQSPI_REG_READCAPTURE 0x10 183 #define CQSPI_REG_READCAPTURE_BYPASS_LSB 0 184 #define CQSPI_REG_READCAPTURE_DELAY_LSB 1 185 #define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF 186 187 #define CQSPI_REG_SIZE 0x14 188 #define CQSPI_REG_SIZE_ADDRESS_LSB 0 189 #define CQSPI_REG_SIZE_PAGE_LSB 4 190 #define CQSPI_REG_SIZE_BLOCK_LSB 16 191 #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF 192 #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF 193 #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F 194 195 #define CQSPI_REG_SRAMPARTITION 0x18 196 #define CQSPI_REG_INDIRECTTRIGGER 0x1C 197 198 #define CQSPI_REG_DMA 0x20 199 #define CQSPI_REG_DMA_SINGLE_LSB 0 200 #define CQSPI_REG_DMA_BURST_LSB 8 201 #define CQSPI_REG_DMA_SINGLE_MASK 0xFF 202 #define CQSPI_REG_DMA_BURST_MASK 0xFF 203 204 #define CQSPI_REG_REMAP 0x24 205 #define CQSPI_REG_MODE_BIT 0x28 206 207 #define CQSPI_REG_SDRAMLEVEL 0x2C 208 #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0 209 #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16 210 #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF 211 #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF 212 213 #define CQSPI_REG_WR_COMPLETION_CTRL 0x38 214 #define CQSPI_REG_WR_DISABLE_AUTO_POLL BIT(14) 215 216 #define CQSPI_REG_IRQSTATUS 0x40 217 #define CQSPI_REG_IRQMASK 0x44 218 219 #define CQSPI_REG_INDIRECTRD 0x60 220 #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0) 221 #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1) 222 #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5) 223 224 #define CQSPI_REG_INDIRECTRDWATERMARK 0x64 225 #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68 226 #define CQSPI_REG_INDIRECTRDBYTES 0x6C 227 228 #define CQSPI_REG_CMDCTRL 0x90 229 #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0) 230 #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1) 231 #define CQSPI_REG_CMDCTRL_DUMMY_LSB 7 232 #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12 233 #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15 234 #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16 235 #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19 236 #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20 237 #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23 238 #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24 239 #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7 240 #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3 241 #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7 242 #define CQSPI_REG_CMDCTRL_DUMMY_MASK 0x1F 243 244 #define CQSPI_REG_INDIRECTWR 0x70 245 #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0) 246 #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1) 247 #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5) 248 249 #define CQSPI_REG_INDIRECTWRWATERMARK 0x74 250 #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78 251 #define CQSPI_REG_INDIRECTWRBYTES 0x7C 252 253 #define CQSPI_REG_INDTRIG_ADDRRANGE 0x80 254 255 #define CQSPI_REG_CMDADDRESS 0x94 256 #define CQSPI_REG_CMDREADDATALOWER 0xA0 257 #define CQSPI_REG_CMDREADDATAUPPER 0xA4 258 #define CQSPI_REG_CMDWRITEDATALOWER 0xA8 259 #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC 260 261 #define CQSPI_REG_POLLING_STATUS 0xB0 262 #define CQSPI_REG_POLLING_STATUS_DUMMY_LSB 16 263 264 #define CQSPI_REG_OP_EXT_LOWER 0xE0 265 #define CQSPI_REG_OP_EXT_READ_LSB 24 266 #define CQSPI_REG_OP_EXT_WRITE_LSB 16 267 #define CQSPI_REG_OP_EXT_STIG_LSB 0 268 269 #define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000 270 271 #define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800 272 #define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804 273 274 #define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C 275 276 #define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814 277 #define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818 278 #define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C 279 #define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1) 280 281 #define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828 282 283 #define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00 284 #define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6 285 286 /* Interrupt status bits */ 287 #define CQSPI_REG_IRQ_MODE_ERR BIT(0) 288 #define CQSPI_REG_IRQ_UNDERFLOW BIT(1) 289 #define CQSPI_REG_IRQ_IND_COMP BIT(2) 290 #define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3) 291 #define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4) 292 #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5) 293 #define CQSPI_REG_IRQ_WATERMARK BIT(6) 294 #define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12) 295 296 #define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \ 297 CQSPI_REG_IRQ_IND_SRAM_FULL | \ 298 CQSPI_REG_IRQ_IND_COMP) 299 300 #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \ 301 CQSPI_REG_IRQ_WATERMARK | \ 302 CQSPI_REG_IRQ_UNDERFLOW) 303 304 #define CQSPI_IRQ_STATUS_MASK 0x1FFFF 305 #define CQSPI_DMA_UNALIGN 0x3 306 307 #define CQSPI_REG_VERSAL_DMA_VAL 0x602 308 309 static int cqspi_wait_for_bit(const struct cqspi_driver_platdata *ddata, 310 void __iomem *reg, const u32 mask, bool clr, 311 bool busywait) 312 { 313 u64 timeout_us = CQSPI_TIMEOUT_MS * USEC_PER_MSEC; 314 u32 val; 315 316 if (busywait) { 317 int ret = readl_relaxed_poll_timeout(reg, val, 318 (((clr ? ~val : val) & mask) == mask), 319 0, CQSPI_BUSYWAIT_TIMEOUT_US); 320 321 if (ret != -ETIMEDOUT) 322 return ret; 323 324 timeout_us -= CQSPI_BUSYWAIT_TIMEOUT_US; 325 } 326 327 return readl_relaxed_poll_timeout(reg, val, 328 (((clr ? ~val : val) & mask) == mask), 329 10, timeout_us); 330 } 331 332 static bool cqspi_is_idle(struct cqspi_st *cqspi) 333 { 334 u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 335 336 return reg & (1UL << CQSPI_REG_CONFIG_IDLE_LSB); 337 } 338 339 static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi) 340 { 341 u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL); 342 343 reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB; 344 return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK; 345 } 346 347 static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi) 348 { 349 u32 dma_status; 350 351 dma_status = readl(cqspi->iobase + 352 CQSPI_REG_VERSAL_DMA_DST_I_STS); 353 writel(dma_status, cqspi->iobase + 354 CQSPI_REG_VERSAL_DMA_DST_I_STS); 355 356 return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK; 357 } 358 359 static irqreturn_t cqspi_irq_handler(int this_irq, void *dev) 360 { 361 struct cqspi_st *cqspi = dev; 362 const struct cqspi_driver_platdata *ddata = cqspi->ddata; 363 unsigned int irq_status; 364 365 /* Read interrupt status */ 366 irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS); 367 368 /* Clear interrupt */ 369 writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS); 370 371 if (cqspi->use_dma_read && ddata && ddata->get_dma_status) { 372 if (ddata->get_dma_status(cqspi)) { 373 complete(&cqspi->transfer_complete); 374 return IRQ_HANDLED; 375 } 376 } 377 378 else if (!cqspi->slow_sram) 379 irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR; 380 else 381 irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR; 382 383 if (irq_status) 384 complete(&cqspi->transfer_complete); 385 386 return IRQ_HANDLED; 387 } 388 389 static unsigned int cqspi_calc_rdreg(const struct spi_mem_op *op) 390 { 391 u32 rdreg = 0; 392 393 rdreg |= CQSPI_OP_WIDTH(op->cmd) << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB; 394 rdreg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB; 395 rdreg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB; 396 397 return rdreg; 398 } 399 400 static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op) 401 { 402 unsigned int dummy_clk; 403 404 if (!op->dummy.nbytes) 405 return 0; 406 407 dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth); 408 if (op->cmd.dtr) 409 dummy_clk /= 2; 410 411 return dummy_clk; 412 } 413 414 static int cqspi_wait_idle(struct cqspi_st *cqspi) 415 { 416 const unsigned int poll_idle_retry = 3; 417 unsigned int count = 0; 418 unsigned long timeout; 419 420 timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS); 421 while (1) { 422 /* 423 * Read few times in succession to ensure the controller 424 * is indeed idle, that is, the bit does not transition 425 * low again. 426 */ 427 if (cqspi_is_idle(cqspi)) 428 count++; 429 else 430 count = 0; 431 432 if (count >= poll_idle_retry) 433 return 0; 434 435 if (time_after(jiffies, timeout)) { 436 /* Timeout, in busy mode. */ 437 dev_err(&cqspi->pdev->dev, 438 "QSPI is still busy after %dms timeout.\n", 439 CQSPI_TIMEOUT_MS); 440 return -ETIMEDOUT; 441 } 442 443 cpu_relax(); 444 } 445 } 446 447 static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg) 448 { 449 void __iomem *reg_base = cqspi->iobase; 450 int ret; 451 452 /* Write the CMDCTRL without start execution. */ 453 writel(reg, reg_base + CQSPI_REG_CMDCTRL); 454 /* Start execute */ 455 reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK; 456 writel(reg, reg_base + CQSPI_REG_CMDCTRL); 457 458 /* Polling for completion. */ 459 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_CMDCTRL, 460 CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1, true); 461 if (ret) { 462 dev_err(&cqspi->pdev->dev, 463 "Flash command execution timed out.\n"); 464 return ret; 465 } 466 467 /* Polling QSPI idle status. */ 468 return cqspi_wait_idle(cqspi); 469 } 470 471 static int cqspi_setup_opcode_ext(struct cqspi_flash_pdata *f_pdata, 472 const struct spi_mem_op *op, 473 unsigned int shift) 474 { 475 struct cqspi_st *cqspi = f_pdata->cqspi; 476 void __iomem *reg_base = cqspi->iobase; 477 unsigned int reg; 478 u8 ext; 479 480 if (op->cmd.nbytes != 2) 481 return -EINVAL; 482 483 /* Opcode extension is the LSB. */ 484 ext = op->cmd.opcode & 0xff; 485 486 reg = readl(reg_base + CQSPI_REG_OP_EXT_LOWER); 487 reg &= ~(0xff << shift); 488 reg |= ext << shift; 489 writel(reg, reg_base + CQSPI_REG_OP_EXT_LOWER); 490 491 return 0; 492 } 493 494 static int cqspi_enable_dtr(struct cqspi_flash_pdata *f_pdata, 495 const struct spi_mem_op *op, unsigned int shift) 496 { 497 struct cqspi_st *cqspi = f_pdata->cqspi; 498 void __iomem *reg_base = cqspi->iobase; 499 unsigned int reg; 500 int ret; 501 502 reg = readl(reg_base + CQSPI_REG_CONFIG); 503 504 /* 505 * We enable dual byte opcode here. The callers have to set up the 506 * extension opcode based on which type of operation it is. 507 */ 508 if (op->cmd.dtr) { 509 reg |= CQSPI_REG_CONFIG_DTR_PROTO; 510 reg |= CQSPI_REG_CONFIG_DUAL_OPCODE; 511 512 /* Set up command opcode extension. */ 513 ret = cqspi_setup_opcode_ext(f_pdata, op, shift); 514 if (ret) 515 return ret; 516 } else { 517 unsigned int mask = CQSPI_REG_CONFIG_DTR_PROTO | CQSPI_REG_CONFIG_DUAL_OPCODE; 518 /* Shortcut if DTR is already disabled. */ 519 if ((reg & mask) == 0) 520 return 0; 521 reg &= ~mask; 522 } 523 524 writel(reg, reg_base + CQSPI_REG_CONFIG); 525 526 return cqspi_wait_idle(cqspi); 527 } 528 529 static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata, 530 const struct spi_mem_op *op) 531 { 532 struct cqspi_st *cqspi = f_pdata->cqspi; 533 void __iomem *reg_base = cqspi->iobase; 534 u8 *rxbuf = op->data.buf.in; 535 u8 opcode; 536 size_t n_rx = op->data.nbytes; 537 unsigned int rdreg; 538 unsigned int reg; 539 unsigned int dummy_clk; 540 size_t read_len; 541 int status; 542 543 status = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); 544 if (status) 545 return status; 546 547 if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) { 548 dev_err(&cqspi->pdev->dev, 549 "Invalid input argument, len %zu rxbuf 0x%p\n", 550 n_rx, rxbuf); 551 return -EINVAL; 552 } 553 554 if (op->cmd.dtr) 555 opcode = op->cmd.opcode >> 8; 556 else 557 opcode = op->cmd.opcode; 558 559 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 560 561 rdreg = cqspi_calc_rdreg(op); 562 writel(rdreg, reg_base + CQSPI_REG_RD_INSTR); 563 564 dummy_clk = cqspi_calc_dummy(op); 565 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 566 return -EOPNOTSUPP; 567 568 if (dummy_clk) 569 reg |= (dummy_clk & CQSPI_REG_CMDCTRL_DUMMY_MASK) 570 << CQSPI_REG_CMDCTRL_DUMMY_LSB; 571 572 reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB); 573 574 /* 0 means 1 byte. */ 575 reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK) 576 << CQSPI_REG_CMDCTRL_RD_BYTES_LSB); 577 578 /* setup ADDR BIT field */ 579 if (op->addr.nbytes) { 580 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 581 reg |= ((op->addr.nbytes - 1) & 582 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 583 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 584 585 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 586 } 587 588 status = cqspi_exec_flash_cmd(cqspi, reg); 589 if (status) 590 return status; 591 592 reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER); 593 594 /* Put the read value into rx_buf */ 595 read_len = (n_rx > 4) ? 4 : n_rx; 596 memcpy(rxbuf, ®, read_len); 597 rxbuf += read_len; 598 599 if (n_rx > 4) { 600 reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER); 601 602 read_len = n_rx - read_len; 603 memcpy(rxbuf, ®, read_len); 604 } 605 606 /* Reset CMD_CTRL Reg once command read completes */ 607 writel(0, reg_base + CQSPI_REG_CMDCTRL); 608 609 return 0; 610 } 611 612 static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata, 613 const struct spi_mem_op *op) 614 { 615 struct cqspi_st *cqspi = f_pdata->cqspi; 616 void __iomem *reg_base = cqspi->iobase; 617 u8 opcode; 618 const u8 *txbuf = op->data.buf.out; 619 size_t n_tx = op->data.nbytes; 620 unsigned int reg; 621 unsigned int data; 622 size_t write_len; 623 int ret; 624 625 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_STIG_LSB); 626 if (ret) 627 return ret; 628 629 if (n_tx > CQSPI_STIG_DATA_LEN_MAX || (n_tx && !txbuf)) { 630 dev_err(&cqspi->pdev->dev, 631 "Invalid input argument, cmdlen %zu txbuf 0x%p\n", 632 n_tx, txbuf); 633 return -EINVAL; 634 } 635 636 reg = cqspi_calc_rdreg(op); 637 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 638 639 if (op->cmd.dtr) 640 opcode = op->cmd.opcode >> 8; 641 else 642 opcode = op->cmd.opcode; 643 644 reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB; 645 646 if (op->addr.nbytes) { 647 reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB); 648 reg |= ((op->addr.nbytes - 1) & 649 CQSPI_REG_CMDCTRL_ADD_BYTES_MASK) 650 << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB; 651 652 writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS); 653 } 654 655 if (n_tx) { 656 reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB); 657 reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK) 658 << CQSPI_REG_CMDCTRL_WR_BYTES_LSB; 659 data = 0; 660 write_len = (n_tx > 4) ? 4 : n_tx; 661 memcpy(&data, txbuf, write_len); 662 txbuf += write_len; 663 writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER); 664 665 if (n_tx > 4) { 666 data = 0; 667 write_len = n_tx - 4; 668 memcpy(&data, txbuf, write_len); 669 writel(data, reg_base + CQSPI_REG_CMDWRITEDATAUPPER); 670 } 671 } 672 673 ret = cqspi_exec_flash_cmd(cqspi, reg); 674 675 /* Reset CMD_CTRL Reg once command write completes */ 676 writel(0, reg_base + CQSPI_REG_CMDCTRL); 677 678 return ret; 679 } 680 681 static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata, 682 const struct spi_mem_op *op) 683 { 684 struct cqspi_st *cqspi = f_pdata->cqspi; 685 void __iomem *reg_base = cqspi->iobase; 686 unsigned int dummy_clk = 0; 687 unsigned int reg; 688 int ret; 689 u8 opcode; 690 691 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_READ_LSB); 692 if (ret) 693 return ret; 694 695 if (op->cmd.dtr) 696 opcode = op->cmd.opcode >> 8; 697 else 698 opcode = op->cmd.opcode; 699 700 reg = opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB; 701 reg |= cqspi_calc_rdreg(op); 702 703 /* Setup dummy clock cycles */ 704 dummy_clk = cqspi_calc_dummy(op); 705 706 if (dummy_clk > CQSPI_DUMMY_CLKS_MAX) 707 return -EOPNOTSUPP; 708 709 if (dummy_clk) 710 reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK) 711 << CQSPI_REG_RD_INSTR_DUMMY_LSB; 712 713 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 714 715 /* Set address width */ 716 reg = readl(reg_base + CQSPI_REG_SIZE); 717 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 718 reg |= (op->addr.nbytes - 1); 719 writel(reg, reg_base + CQSPI_REG_SIZE); 720 return 0; 721 } 722 723 static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata, 724 u8 *rxbuf, loff_t from_addr, 725 const size_t n_rx) 726 { 727 struct cqspi_st *cqspi = f_pdata->cqspi; 728 bool use_irq = !(cqspi->ddata && cqspi->ddata->quirks & CQSPI_RD_NO_IRQ); 729 struct device *dev = &cqspi->pdev->dev; 730 void __iomem *reg_base = cqspi->iobase; 731 void __iomem *ahb_base = cqspi->ahb_base; 732 unsigned int remaining = n_rx; 733 unsigned int mod_bytes = n_rx % 4; 734 unsigned int bytes_to_read = 0; 735 u8 *rxbuf_end = rxbuf + n_rx; 736 int ret = 0; 737 738 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 739 writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES); 740 741 /* Clear all interrupts. */ 742 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 743 744 /* 745 * On SoCFPGA platform reading the SRAM is slow due to 746 * hardware limitation and causing read interrupt storm to CPU, 747 * so enabling only watermark interrupt to disable all read 748 * interrupts later as we want to run "bytes to read" loop with 749 * all the read interrupts disabled for max performance. 750 */ 751 752 if (use_irq && cqspi->slow_sram) 753 writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); 754 else if (use_irq) 755 writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK); 756 else 757 writel(0, reg_base + CQSPI_REG_IRQMASK); 758 759 reinit_completion(&cqspi->transfer_complete); 760 writel(CQSPI_REG_INDIRECTRD_START_MASK, 761 reg_base + CQSPI_REG_INDIRECTRD); 762 763 while (remaining > 0) { 764 if (use_irq && 765 !wait_for_completion_timeout(&cqspi->transfer_complete, 766 msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) 767 ret = -ETIMEDOUT; 768 769 /* 770 * Disable all read interrupts until 771 * we are out of "bytes to read" 772 */ 773 if (cqspi->slow_sram) 774 writel(0x0, reg_base + CQSPI_REG_IRQMASK); 775 776 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 777 778 if (ret && bytes_to_read == 0) { 779 dev_err(dev, "Indirect read timeout, no bytes\n"); 780 goto failrd; 781 } 782 783 while (bytes_to_read != 0) { 784 unsigned int word_remain = round_down(remaining, 4); 785 786 bytes_to_read *= cqspi->fifo_width; 787 bytes_to_read = bytes_to_read > remaining ? 788 remaining : bytes_to_read; 789 bytes_to_read = round_down(bytes_to_read, 4); 790 /* Read 4 byte word chunks then single bytes */ 791 if (bytes_to_read) { 792 ioread32_rep(ahb_base, rxbuf, 793 (bytes_to_read / 4)); 794 } else if (!word_remain && mod_bytes) { 795 unsigned int temp = ioread32(ahb_base); 796 797 bytes_to_read = mod_bytes; 798 memcpy(rxbuf, &temp, min((unsigned int) 799 (rxbuf_end - rxbuf), 800 bytes_to_read)); 801 } 802 rxbuf += bytes_to_read; 803 remaining -= bytes_to_read; 804 bytes_to_read = cqspi_get_rd_sram_level(cqspi); 805 } 806 807 if (use_irq && remaining > 0) { 808 reinit_completion(&cqspi->transfer_complete); 809 if (cqspi->slow_sram) 810 writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK); 811 } 812 } 813 814 /* Check indirect done status */ 815 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTRD, 816 CQSPI_REG_INDIRECTRD_DONE_MASK, 0, true); 817 if (ret) { 818 dev_err(dev, "Indirect read completion error (%i)\n", ret); 819 goto failrd; 820 } 821 822 /* Disable interrupt */ 823 writel(0, reg_base + CQSPI_REG_IRQMASK); 824 825 /* Clear indirect completion status */ 826 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD); 827 828 return 0; 829 830 failrd: 831 /* Disable interrupt */ 832 writel(0, reg_base + CQSPI_REG_IRQMASK); 833 834 /* Cancel the indirect read */ 835 writel(CQSPI_REG_INDIRECTRD_CANCEL_MASK, 836 reg_base + CQSPI_REG_INDIRECTRD); 837 return ret; 838 } 839 840 static void cqspi_device_reset(struct cqspi_st *cqspi) 841 { 842 u32 reg; 843 844 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 845 reg |= CQSPI_REG_CONFIG_RESET_CFG_FLD_MASK; 846 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 847 /* 848 * NOTE: Delay timing implementation is derived from 849 * spi_nor_hw_reset() 850 */ 851 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 852 usleep_range(1, 5); 853 writel(reg | CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 854 usleep_range(100, 150); 855 writel(reg & ~CQSPI_REG_CONFIG_RESET_PIN_FLD_MASK, cqspi->iobase + CQSPI_REG_CONFIG); 856 usleep_range(1000, 1200); 857 } 858 859 static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable) 860 { 861 void __iomem *reg_base = cqspi->iobase; 862 unsigned int reg; 863 864 reg = readl(reg_base + CQSPI_REG_CONFIG); 865 866 if (enable) 867 reg |= CQSPI_REG_CONFIG_ENABLE_MASK; 868 else 869 reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK; 870 871 writel(reg, reg_base + CQSPI_REG_CONFIG); 872 } 873 874 static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata, 875 u_char *rxbuf, loff_t from_addr, 876 size_t n_rx) 877 { 878 struct cqspi_st *cqspi = f_pdata->cqspi; 879 struct device *dev = &cqspi->pdev->dev; 880 void __iomem *reg_base = cqspi->iobase; 881 u32 reg, bytes_to_dma; 882 loff_t addr = from_addr; 883 void *buf = rxbuf; 884 dma_addr_t dma_addr; 885 u8 bytes_rem; 886 int ret = 0; 887 888 bytes_rem = n_rx % 4; 889 bytes_to_dma = (n_rx - bytes_rem); 890 891 if (!bytes_to_dma) 892 goto nondmard; 893 894 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA); 895 if (ret) 896 return ret; 897 898 cqspi_controller_enable(cqspi, 0); 899 900 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 901 reg |= CQSPI_REG_CONFIG_DMA_MASK; 902 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 903 904 cqspi_controller_enable(cqspi, 1); 905 906 dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE); 907 if (dma_mapping_error(dev, dma_addr)) { 908 dev_err(dev, "dma mapping failed\n"); 909 return -ENOMEM; 910 } 911 912 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR); 913 writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES); 914 writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL, 915 reg_base + CQSPI_REG_INDTRIG_ADDRRANGE); 916 917 /* Clear all interrupts. */ 918 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 919 920 /* Enable DMA done interrupt */ 921 writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK, 922 reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN); 923 924 /* Default DMA periph configuration */ 925 writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA); 926 927 /* Configure DMA Dst address */ 928 writel(lower_32_bits(dma_addr), 929 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR); 930 writel(upper_32_bits(dma_addr), 931 reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB); 932 933 /* Configure DMA Src address */ 934 writel(cqspi->trigger_address, reg_base + 935 CQSPI_REG_VERSAL_DMA_SRC_ADDR); 936 937 /* Set DMA destination size */ 938 writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE); 939 940 /* Set DMA destination control */ 941 writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL, 942 reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL); 943 944 writel(CQSPI_REG_INDIRECTRD_START_MASK, 945 reg_base + CQSPI_REG_INDIRECTRD); 946 947 reinit_completion(&cqspi->transfer_complete); 948 949 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 950 msecs_to_jiffies(max_t(size_t, bytes_to_dma, 500)))) { 951 ret = -ETIMEDOUT; 952 goto failrd; 953 } 954 955 /* Disable DMA interrupt */ 956 writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 957 958 /* Clear indirect completion status */ 959 writel(CQSPI_REG_INDIRECTRD_DONE_MASK, 960 cqspi->iobase + CQSPI_REG_INDIRECTRD); 961 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 962 963 cqspi_controller_enable(cqspi, 0); 964 965 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 966 reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 967 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 968 969 cqspi_controller_enable(cqspi, 1); 970 971 ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, 972 PM_OSPI_MUX_SEL_LINEAR); 973 if (ret) 974 return ret; 975 976 nondmard: 977 if (bytes_rem) { 978 addr += bytes_to_dma; 979 buf += bytes_to_dma; 980 ret = cqspi_indirect_read_execute(f_pdata, buf, addr, 981 bytes_rem); 982 if (ret) 983 return ret; 984 } 985 986 return 0; 987 988 failrd: 989 /* Disable DMA interrupt */ 990 writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS); 991 992 /* Cancel the indirect read */ 993 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 994 reg_base + CQSPI_REG_INDIRECTRD); 995 996 dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE); 997 998 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 999 reg &= ~CQSPI_REG_CONFIG_DMA_MASK; 1000 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1001 1002 zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR); 1003 1004 return ret; 1005 } 1006 1007 static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, 1008 const struct spi_mem_op *op) 1009 { 1010 unsigned int reg; 1011 int ret; 1012 struct cqspi_st *cqspi = f_pdata->cqspi; 1013 void __iomem *reg_base = cqspi->iobase; 1014 u8 opcode; 1015 1016 ret = cqspi_enable_dtr(f_pdata, op, CQSPI_REG_OP_EXT_WRITE_LSB); 1017 if (ret) 1018 return ret; 1019 1020 if (op->cmd.dtr) 1021 opcode = op->cmd.opcode >> 8; 1022 else 1023 opcode = op->cmd.opcode; 1024 1025 /* Set opcode. */ 1026 reg = opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB; 1027 reg |= CQSPI_OP_WIDTH(op->data) << CQSPI_REG_WR_INSTR_TYPE_DATA_LSB; 1028 reg |= CQSPI_OP_WIDTH(op->addr) << CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB; 1029 writel(reg, reg_base + CQSPI_REG_WR_INSTR); 1030 reg = cqspi_calc_rdreg(op); 1031 writel(reg, reg_base + CQSPI_REG_RD_INSTR); 1032 1033 /* 1034 * SPI NAND flashes require the address of the status register to be 1035 * passed in the Read SR command. Also, some SPI NOR flashes like the 1036 * cypress Semper flash expect a 4-byte dummy address in the Read SR 1037 * command in DTR mode. 1038 * 1039 * But this controller does not support address phase in the Read SR 1040 * command when doing auto-HW polling. So, disable write completion 1041 * polling on the controller's side. spinand and spi-nor will take 1042 * care of polling the status register. 1043 */ 1044 if (cqspi->wr_completion) { 1045 reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); 1046 reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; 1047 writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); 1048 /* 1049 * DAC mode require auto polling as flash needs to be polled 1050 * for write completion in case of bubble in SPI transaction 1051 * due to slow CPU/DMA master. 1052 */ 1053 cqspi->use_direct_mode_wr = false; 1054 } 1055 1056 reg = readl(reg_base + CQSPI_REG_SIZE); 1057 reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; 1058 reg |= (op->addr.nbytes - 1); 1059 writel(reg, reg_base + CQSPI_REG_SIZE); 1060 return 0; 1061 } 1062 1063 static int cqspi_indirect_write_execute(struct cqspi_flash_pdata *f_pdata, 1064 loff_t to_addr, const u8 *txbuf, 1065 const size_t n_tx) 1066 { 1067 struct cqspi_st *cqspi = f_pdata->cqspi; 1068 struct device *dev = &cqspi->pdev->dev; 1069 void __iomem *reg_base = cqspi->iobase; 1070 unsigned int remaining = n_tx; 1071 unsigned int write_bytes; 1072 int ret; 1073 1074 writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR); 1075 writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES); 1076 1077 /* Clear all interrupts. */ 1078 writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS); 1079 1080 writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK); 1081 1082 reinit_completion(&cqspi->transfer_complete); 1083 writel(CQSPI_REG_INDIRECTWR_START_MASK, 1084 reg_base + CQSPI_REG_INDIRECTWR); 1085 /* 1086 * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access 1087 * Controller programming sequence, couple of cycles of 1088 * QSPI_REF_CLK delay is required for the above bit to 1089 * be internally synchronized by the QSPI module. Provide 5 1090 * cycles of delay. 1091 */ 1092 if (cqspi->wr_delay) 1093 ndelay(cqspi->wr_delay); 1094 1095 /* 1096 * If a hazard exists between the APB and AHB interfaces, perform a 1097 * dummy readback from the controller to ensure synchronization. 1098 */ 1099 if (cqspi->apb_ahb_hazard) 1100 readl(reg_base + CQSPI_REG_INDIRECTWR); 1101 1102 while (remaining > 0) { 1103 size_t write_words, mod_bytes; 1104 1105 write_bytes = remaining; 1106 write_words = write_bytes / 4; 1107 mod_bytes = write_bytes % 4; 1108 /* Write 4 bytes at a time then single bytes. */ 1109 if (write_words) { 1110 iowrite32_rep(cqspi->ahb_base, txbuf, write_words); 1111 txbuf += (write_words * 4); 1112 } 1113 if (mod_bytes) { 1114 unsigned int temp = 0xFFFFFFFF; 1115 1116 memcpy(&temp, txbuf, mod_bytes); 1117 iowrite32(temp, cqspi->ahb_base); 1118 txbuf += mod_bytes; 1119 } 1120 1121 if (!wait_for_completion_timeout(&cqspi->transfer_complete, 1122 msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { 1123 dev_err(dev, "Indirect write timeout\n"); 1124 ret = -ETIMEDOUT; 1125 goto failwr; 1126 } 1127 1128 remaining -= write_bytes; 1129 1130 if (remaining > 0) 1131 reinit_completion(&cqspi->transfer_complete); 1132 } 1133 1134 /* Check indirect done status */ 1135 ret = cqspi_wait_for_bit(cqspi->ddata, reg_base + CQSPI_REG_INDIRECTWR, 1136 CQSPI_REG_INDIRECTWR_DONE_MASK, 0, false); 1137 if (ret) { 1138 dev_err(dev, "Indirect write completion error (%i)\n", ret); 1139 goto failwr; 1140 } 1141 1142 /* Disable interrupt. */ 1143 writel(0, reg_base + CQSPI_REG_IRQMASK); 1144 1145 /* Clear indirect completion status */ 1146 writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR); 1147 1148 cqspi_wait_idle(cqspi); 1149 1150 return 0; 1151 1152 failwr: 1153 /* Disable interrupt. */ 1154 writel(0, reg_base + CQSPI_REG_IRQMASK); 1155 1156 /* Cancel the indirect write */ 1157 writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK, 1158 reg_base + CQSPI_REG_INDIRECTWR); 1159 return ret; 1160 } 1161 1162 static void cqspi_chipselect(struct cqspi_flash_pdata *f_pdata) 1163 { 1164 struct cqspi_st *cqspi = f_pdata->cqspi; 1165 void __iomem *reg_base = cqspi->iobase; 1166 unsigned int chip_select = f_pdata->cs; 1167 unsigned int reg; 1168 1169 reg = readl(reg_base + CQSPI_REG_CONFIG); 1170 if (cqspi->is_decoded_cs) { 1171 reg |= CQSPI_REG_CONFIG_DECODE_MASK; 1172 } else { 1173 reg &= ~CQSPI_REG_CONFIG_DECODE_MASK; 1174 1175 /* Convert CS if without decoder. 1176 * CS0 to 4b'1110 1177 * CS1 to 4b'1101 1178 * CS2 to 4b'1011 1179 * CS3 to 4b'0111 1180 */ 1181 chip_select = 0xF & ~(1 << chip_select); 1182 } 1183 1184 reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK 1185 << CQSPI_REG_CONFIG_CHIPSELECT_LSB); 1186 reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK) 1187 << CQSPI_REG_CONFIG_CHIPSELECT_LSB; 1188 writel(reg, reg_base + CQSPI_REG_CONFIG); 1189 } 1190 1191 static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz, 1192 const unsigned int ns_val) 1193 { 1194 unsigned int ticks; 1195 1196 ticks = ref_clk_hz / 1000; /* kHz */ 1197 ticks = DIV_ROUND_UP(ticks * ns_val, 1000000); 1198 1199 return ticks; 1200 } 1201 1202 static void cqspi_delay(struct cqspi_flash_pdata *f_pdata) 1203 { 1204 struct cqspi_st *cqspi = f_pdata->cqspi; 1205 void __iomem *iobase = cqspi->iobase; 1206 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; 1207 unsigned int tshsl, tchsh, tslch, tsd2d; 1208 unsigned int reg; 1209 unsigned int tsclk; 1210 1211 /* calculate the number of ref ticks for one sclk tick */ 1212 tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk); 1213 1214 tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns); 1215 /* this particular value must be at least one sclk */ 1216 if (tshsl < tsclk) 1217 tshsl = tsclk; 1218 1219 tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns); 1220 tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns); 1221 tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns); 1222 1223 reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK) 1224 << CQSPI_REG_DELAY_TSHSL_LSB; 1225 reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK) 1226 << CQSPI_REG_DELAY_TCHSH_LSB; 1227 reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK) 1228 << CQSPI_REG_DELAY_TSLCH_LSB; 1229 reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK) 1230 << CQSPI_REG_DELAY_TSD2D_LSB; 1231 writel(reg, iobase + CQSPI_REG_DELAY); 1232 } 1233 1234 static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) 1235 { 1236 const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz; 1237 void __iomem *reg_base = cqspi->iobase; 1238 u32 reg, div; 1239 1240 /* Recalculate the baudrate divisor based on QSPI specification. */ 1241 div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1; 1242 1243 /* Maximum baud divisor */ 1244 if (div > CQSPI_REG_CONFIG_BAUD_MASK) { 1245 div = CQSPI_REG_CONFIG_BAUD_MASK; 1246 dev_warn(&cqspi->pdev->dev, 1247 "Unable to adjust clock <= %d hz. Reduced to %d hz\n", 1248 cqspi->sclk, ref_clk_hz/((div+1)*2)); 1249 } 1250 1251 reg = readl(reg_base + CQSPI_REG_CONFIG); 1252 reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB); 1253 reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB; 1254 writel(reg, reg_base + CQSPI_REG_CONFIG); 1255 } 1256 1257 static void cqspi_readdata_capture(struct cqspi_st *cqspi, 1258 const bool bypass, 1259 const unsigned int delay) 1260 { 1261 void __iomem *reg_base = cqspi->iobase; 1262 unsigned int reg; 1263 1264 reg = readl(reg_base + CQSPI_REG_READCAPTURE); 1265 1266 if (bypass) 1267 reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); 1268 else 1269 reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB); 1270 1271 reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK 1272 << CQSPI_REG_READCAPTURE_DELAY_LSB); 1273 1274 reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK) 1275 << CQSPI_REG_READCAPTURE_DELAY_LSB; 1276 1277 writel(reg, reg_base + CQSPI_REG_READCAPTURE); 1278 } 1279 1280 static void cqspi_configure(struct cqspi_flash_pdata *f_pdata, 1281 unsigned long sclk) 1282 { 1283 struct cqspi_st *cqspi = f_pdata->cqspi; 1284 int switch_cs = (cqspi->current_cs != f_pdata->cs); 1285 int switch_ck = (cqspi->sclk != sclk); 1286 1287 if (switch_cs || switch_ck) 1288 cqspi_controller_enable(cqspi, 0); 1289 1290 /* Switch chip select. */ 1291 if (switch_cs) { 1292 cqspi->current_cs = f_pdata->cs; 1293 cqspi_chipselect(f_pdata); 1294 } 1295 1296 /* Setup baudrate divisor and delays */ 1297 if (switch_ck) { 1298 cqspi->sclk = sclk; 1299 cqspi_config_baudrate_div(cqspi); 1300 cqspi_delay(f_pdata); 1301 cqspi_readdata_capture(cqspi, !cqspi->rclk_en, 1302 f_pdata->read_delay); 1303 } 1304 1305 if (switch_cs || switch_ck) 1306 cqspi_controller_enable(cqspi, 1); 1307 } 1308 1309 static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata, 1310 const struct spi_mem_op *op) 1311 { 1312 struct cqspi_st *cqspi = f_pdata->cqspi; 1313 loff_t to = op->addr.val; 1314 size_t len = op->data.nbytes; 1315 const u_char *buf = op->data.buf.out; 1316 int ret; 1317 1318 ret = cqspi_write_setup(f_pdata, op); 1319 if (ret) 1320 return ret; 1321 1322 /* 1323 * Some flashes like the Cypress Semper flash expect a dummy 4-byte 1324 * address (all 0s) with the read status register command in DTR mode. 1325 * But this controller does not support sending dummy address bytes to 1326 * the flash when it is polling the write completion register in DTR 1327 * mode. So, we can not use direct mode when in DTR mode for writing 1328 * data. 1329 */ 1330 if (!op->cmd.dtr && cqspi->use_direct_mode && 1331 cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) { 1332 memcpy_toio(cqspi->ahb_base + to, buf, len); 1333 return cqspi_wait_idle(cqspi); 1334 } 1335 1336 return cqspi_indirect_write_execute(f_pdata, to, buf, len); 1337 } 1338 1339 static void cqspi_rx_dma_callback(void *param) 1340 { 1341 struct cqspi_st *cqspi = param; 1342 1343 complete(&cqspi->rx_dma_complete); 1344 } 1345 1346 static int cqspi_direct_read_execute(struct cqspi_flash_pdata *f_pdata, 1347 u_char *buf, loff_t from, size_t len) 1348 { 1349 struct cqspi_st *cqspi = f_pdata->cqspi; 1350 struct device *dev = &cqspi->pdev->dev; 1351 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 1352 dma_addr_t dma_src = (dma_addr_t)cqspi->mmap_phys_base + from; 1353 int ret = 0; 1354 struct dma_async_tx_descriptor *tx; 1355 dma_cookie_t cookie; 1356 dma_addr_t dma_dst; 1357 struct device *ddev; 1358 1359 if (!cqspi->rx_chan || !virt_addr_valid(buf)) { 1360 memcpy_fromio(buf, cqspi->ahb_base + from, len); 1361 return 0; 1362 } 1363 1364 ddev = cqspi->rx_chan->device->dev; 1365 dma_dst = dma_map_single(ddev, buf, len, DMA_FROM_DEVICE); 1366 if (dma_mapping_error(ddev, dma_dst)) { 1367 dev_err(dev, "dma mapping failed\n"); 1368 return -ENOMEM; 1369 } 1370 tx = dmaengine_prep_dma_memcpy(cqspi->rx_chan, dma_dst, dma_src, 1371 len, flags); 1372 if (!tx) { 1373 dev_err(dev, "device_prep_dma_memcpy error\n"); 1374 ret = -EIO; 1375 goto err_unmap; 1376 } 1377 1378 tx->callback = cqspi_rx_dma_callback; 1379 tx->callback_param = cqspi; 1380 cookie = tx->tx_submit(tx); 1381 reinit_completion(&cqspi->rx_dma_complete); 1382 1383 ret = dma_submit_error(cookie); 1384 if (ret) { 1385 dev_err(dev, "dma_submit_error %d\n", cookie); 1386 ret = -EIO; 1387 goto err_unmap; 1388 } 1389 1390 dma_async_issue_pending(cqspi->rx_chan); 1391 if (!wait_for_completion_timeout(&cqspi->rx_dma_complete, 1392 msecs_to_jiffies(max_t(size_t, len, 500)))) { 1393 dmaengine_terminate_sync(cqspi->rx_chan); 1394 dev_err(dev, "DMA wait_for_completion_timeout\n"); 1395 ret = -ETIMEDOUT; 1396 goto err_unmap; 1397 } 1398 1399 err_unmap: 1400 dma_unmap_single(ddev, dma_dst, len, DMA_FROM_DEVICE); 1401 1402 return ret; 1403 } 1404 1405 static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata, 1406 const struct spi_mem_op *op) 1407 { 1408 struct cqspi_st *cqspi = f_pdata->cqspi; 1409 const struct cqspi_driver_platdata *ddata = cqspi->ddata; 1410 loff_t from = op->addr.val; 1411 size_t len = op->data.nbytes; 1412 u_char *buf = op->data.buf.in; 1413 u64 dma_align = (u64)(uintptr_t)buf; 1414 int ret; 1415 1416 ret = cqspi_read_setup(f_pdata, op); 1417 if (ret) 1418 return ret; 1419 1420 if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size)) 1421 return cqspi_direct_read_execute(f_pdata, buf, from, len); 1422 1423 if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma && 1424 virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0)) 1425 return ddata->indirect_read_dma(f_pdata, buf, from, len); 1426 1427 return cqspi_indirect_read_execute(f_pdata, buf, from, len); 1428 } 1429 1430 static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op) 1431 { 1432 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1433 struct cqspi_flash_pdata *f_pdata; 1434 1435 f_pdata = &cqspi->f_pdata[spi_get_chipselect(mem->spi, 0)]; 1436 cqspi_configure(f_pdata, op->max_freq); 1437 1438 if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) { 1439 /* 1440 * Performing reads in DAC mode forces to read minimum 4 bytes 1441 * which is unsupported on some flash devices during register 1442 * reads, prefer STIG mode for such small reads. 1443 */ 1444 if (!op->addr.nbytes || 1445 (op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX && 1446 !cqspi->disable_stig_mode)) 1447 return cqspi_command_read(f_pdata, op); 1448 1449 return cqspi_read(f_pdata, op); 1450 } 1451 1452 if (!op->addr.nbytes || !op->data.buf.out) 1453 return cqspi_command_write(f_pdata, op); 1454 1455 return cqspi_write(f_pdata, op); 1456 } 1457 1458 static int cqspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) 1459 { 1460 int ret; 1461 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1462 struct device *dev = &cqspi->pdev->dev; 1463 1464 ret = pm_runtime_resume_and_get(dev); 1465 if (ret) { 1466 dev_err(&mem->spi->dev, "resume failed with %d\n", ret); 1467 return ret; 1468 } 1469 1470 ret = cqspi_mem_process(mem, op); 1471 1472 pm_runtime_put_autosuspend(dev); 1473 1474 if (ret) 1475 dev_err(&mem->spi->dev, "operation failed with %d\n", ret); 1476 1477 return ret; 1478 } 1479 1480 static bool cqspi_supports_mem_op(struct spi_mem *mem, 1481 const struct spi_mem_op *op) 1482 { 1483 bool all_true, all_false; 1484 1485 /* 1486 * op->dummy.dtr is required for converting nbytes into ncycles. 1487 * Also, don't check the dtr field of the op phase having zero nbytes. 1488 */ 1489 all_true = op->cmd.dtr && 1490 (!op->addr.nbytes || op->addr.dtr) && 1491 (!op->dummy.nbytes || op->dummy.dtr) && 1492 (!op->data.nbytes || op->data.dtr); 1493 1494 all_false = !op->cmd.dtr && !op->addr.dtr && !op->dummy.dtr && 1495 !op->data.dtr; 1496 1497 if (all_true) { 1498 /* Right now we only support 8-8-8 DTR mode. */ 1499 if (op->cmd.nbytes && op->cmd.buswidth != 8) 1500 return false; 1501 if (op->addr.nbytes && op->addr.buswidth != 8) 1502 return false; 1503 if (op->data.nbytes && op->data.buswidth != 8) 1504 return false; 1505 } else if (!all_false) { 1506 /* Mixed DTR modes are not supported. */ 1507 return false; 1508 } 1509 1510 return spi_mem_default_supports_op(mem, op); 1511 } 1512 1513 static int cqspi_of_get_flash_pdata(struct platform_device *pdev, 1514 struct cqspi_flash_pdata *f_pdata, 1515 struct device_node *np) 1516 { 1517 if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) { 1518 dev_err(&pdev->dev, "couldn't determine read-delay\n"); 1519 return -ENXIO; 1520 } 1521 1522 if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) { 1523 dev_err(&pdev->dev, "couldn't determine tshsl-ns\n"); 1524 return -ENXIO; 1525 } 1526 1527 if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) { 1528 dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n"); 1529 return -ENXIO; 1530 } 1531 1532 if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) { 1533 dev_err(&pdev->dev, "couldn't determine tchsh-ns\n"); 1534 return -ENXIO; 1535 } 1536 1537 if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) { 1538 dev_err(&pdev->dev, "couldn't determine tslch-ns\n"); 1539 return -ENXIO; 1540 } 1541 1542 if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) { 1543 dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n"); 1544 return -ENXIO; 1545 } 1546 1547 return 0; 1548 } 1549 1550 static int cqspi_of_get_pdata(struct cqspi_st *cqspi) 1551 { 1552 struct device *dev = &cqspi->pdev->dev; 1553 struct device_node *np = dev->of_node; 1554 u32 id[2]; 1555 1556 cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs"); 1557 1558 if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) { 1559 /* Zero signals FIFO depth should be runtime detected. */ 1560 cqspi->fifo_depth = 0; 1561 } 1562 1563 if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) { 1564 dev_err(dev, "couldn't determine fifo-width\n"); 1565 return -ENXIO; 1566 } 1567 1568 if (of_property_read_u32(np, "cdns,trigger-address", 1569 &cqspi->trigger_address)) { 1570 dev_err(dev, "couldn't determine trigger-address\n"); 1571 return -ENXIO; 1572 } 1573 1574 if (of_property_read_u32(np, "num-cs", &cqspi->num_chipselect)) 1575 cqspi->num_chipselect = CQSPI_MAX_CHIPSELECT; 1576 1577 cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); 1578 1579 if (!of_property_read_u32_array(np, "power-domains", id, 1580 ARRAY_SIZE(id))) 1581 cqspi->pd_dev_id = id[1]; 1582 1583 return 0; 1584 } 1585 1586 static void cqspi_controller_init(struct cqspi_st *cqspi) 1587 { 1588 u32 reg; 1589 1590 /* Configure the remap address register, no remap */ 1591 writel(0, cqspi->iobase + CQSPI_REG_REMAP); 1592 1593 /* Disable all interrupts. */ 1594 writel(0, cqspi->iobase + CQSPI_REG_IRQMASK); 1595 1596 /* Configure the SRAM split to 1:1 . */ 1597 writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1598 1599 /* Load indirect trigger address. */ 1600 writel(cqspi->trigger_address, 1601 cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER); 1602 1603 /* Program read watermark -- 1/2 of the FIFO. */ 1604 writel(cqspi->fifo_depth * cqspi->fifo_width / 2, 1605 cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK); 1606 /* Program write watermark -- 1/8 of the FIFO. */ 1607 writel(cqspi->fifo_depth * cqspi->fifo_width / 8, 1608 cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK); 1609 1610 /* Disable direct access controller */ 1611 if (!cqspi->use_direct_mode) { 1612 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1613 reg &= ~CQSPI_REG_CONFIG_ENB_DIR_ACC_CTRL; 1614 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1615 } 1616 1617 /* Enable DMA interface */ 1618 if (cqspi->use_dma_read) { 1619 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG); 1620 reg |= CQSPI_REG_CONFIG_DMA_MASK; 1621 writel(reg, cqspi->iobase + CQSPI_REG_CONFIG); 1622 } 1623 } 1624 1625 static void cqspi_controller_detect_fifo_depth(struct cqspi_st *cqspi) 1626 { 1627 struct device *dev = &cqspi->pdev->dev; 1628 u32 reg, fifo_depth; 1629 1630 /* 1631 * Bits N-1:0 are writable while bits 31:N are read as zero, with 2^N 1632 * the FIFO depth. 1633 */ 1634 writel(U32_MAX, cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1635 reg = readl(cqspi->iobase + CQSPI_REG_SRAMPARTITION); 1636 fifo_depth = reg + 1; 1637 1638 /* FIFO depth of zero means no value from devicetree was provided. */ 1639 if (cqspi->fifo_depth == 0) { 1640 cqspi->fifo_depth = fifo_depth; 1641 dev_dbg(dev, "using FIFO depth of %u\n", fifo_depth); 1642 } else if (fifo_depth != cqspi->fifo_depth) { 1643 dev_warn(dev, "detected FIFO depth (%u) different from config (%u)\n", 1644 fifo_depth, cqspi->fifo_depth); 1645 } 1646 } 1647 1648 static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) 1649 { 1650 dma_cap_mask_t mask; 1651 1652 dma_cap_zero(mask); 1653 dma_cap_set(DMA_MEMCPY, mask); 1654 1655 cqspi->rx_chan = dma_request_chan_by_mask(&mask); 1656 if (IS_ERR(cqspi->rx_chan)) { 1657 int ret = PTR_ERR(cqspi->rx_chan); 1658 1659 cqspi->rx_chan = NULL; 1660 if (ret == -ENODEV) { 1661 /* DMA support is not mandatory */ 1662 dev_info(&cqspi->pdev->dev, "No Rx DMA available\n"); 1663 return 0; 1664 } 1665 1666 return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n"); 1667 } 1668 init_completion(&cqspi->rx_dma_complete); 1669 1670 return 0; 1671 } 1672 1673 static const char *cqspi_get_name(struct spi_mem *mem) 1674 { 1675 struct cqspi_st *cqspi = spi_controller_get_devdata(mem->spi->controller); 1676 struct device *dev = &cqspi->pdev->dev; 1677 1678 return devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev), 1679 spi_get_chipselect(mem->spi, 0)); 1680 } 1681 1682 static const struct spi_controller_mem_ops cqspi_mem_ops = { 1683 .exec_op = cqspi_exec_mem_op, 1684 .get_name = cqspi_get_name, 1685 .supports_op = cqspi_supports_mem_op, 1686 }; 1687 1688 static const struct spi_controller_mem_caps cqspi_mem_caps = { 1689 .dtr = true, 1690 .per_op_freq = true, 1691 }; 1692 1693 static int cqspi_setup_flash(struct cqspi_st *cqspi) 1694 { 1695 unsigned int max_cs = cqspi->num_chipselect - 1; 1696 struct platform_device *pdev = cqspi->pdev; 1697 struct device *dev = &pdev->dev; 1698 struct cqspi_flash_pdata *f_pdata; 1699 unsigned int cs; 1700 int ret; 1701 1702 /* Get flash device data */ 1703 for_each_available_child_of_node_scoped(dev->of_node, np) { 1704 ret = of_property_read_u32(np, "reg", &cs); 1705 if (ret) { 1706 dev_err(dev, "Couldn't determine chip select.\n"); 1707 return ret; 1708 } 1709 1710 if (cs >= cqspi->num_chipselect) { 1711 dev_err(dev, "Chip select %d out of range.\n", cs); 1712 return -EINVAL; 1713 } else if (cs < max_cs) { 1714 max_cs = cs; 1715 } 1716 1717 f_pdata = &cqspi->f_pdata[cs]; 1718 f_pdata->cqspi = cqspi; 1719 f_pdata->cs = cs; 1720 1721 ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np); 1722 if (ret) 1723 return ret; 1724 } 1725 1726 cqspi->num_chipselect = max_cs + 1; 1727 return 0; 1728 } 1729 1730 static int cqspi_jh7110_clk_init(struct platform_device *pdev, struct cqspi_st *cqspi) 1731 { 1732 static struct clk_bulk_data qspiclk[] = { 1733 { .id = "apb" }, 1734 { .id = "ahb" }, 1735 }; 1736 1737 int ret = 0; 1738 1739 ret = devm_clk_bulk_get(&pdev->dev, ARRAY_SIZE(qspiclk), qspiclk); 1740 if (ret) { 1741 dev_err(&pdev->dev, "%s: failed to get qspi clocks\n", __func__); 1742 return ret; 1743 } 1744 1745 cqspi->clks[CLK_QSPI_APB] = qspiclk[0].clk; 1746 cqspi->clks[CLK_QSPI_AHB] = qspiclk[1].clk; 1747 1748 ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_APB]); 1749 if (ret) { 1750 dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_APB\n", __func__); 1751 return ret; 1752 } 1753 1754 ret = clk_prepare_enable(cqspi->clks[CLK_QSPI_AHB]); 1755 if (ret) { 1756 dev_err(&pdev->dev, "%s: failed to enable CLK_QSPI_AHB\n", __func__); 1757 goto disable_apb_clk; 1758 } 1759 1760 cqspi->is_jh7110 = true; 1761 1762 return 0; 1763 1764 disable_apb_clk: 1765 clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]); 1766 1767 return ret; 1768 } 1769 1770 static void cqspi_jh7110_disable_clk(struct platform_device *pdev, struct cqspi_st *cqspi) 1771 { 1772 clk_disable_unprepare(cqspi->clks[CLK_QSPI_AHB]); 1773 clk_disable_unprepare(cqspi->clks[CLK_QSPI_APB]); 1774 } 1775 static int cqspi_probe(struct platform_device *pdev) 1776 { 1777 const struct cqspi_driver_platdata *ddata; 1778 struct reset_control *rstc, *rstc_ocp, *rstc_ref; 1779 struct device *dev = &pdev->dev; 1780 struct spi_controller *host; 1781 struct resource *res_ahb; 1782 struct cqspi_st *cqspi; 1783 int ret; 1784 int irq; 1785 1786 host = devm_spi_alloc_host(&pdev->dev, sizeof(*cqspi)); 1787 if (!host) 1788 return -ENOMEM; 1789 1790 host->mode_bits = SPI_RX_QUAD | SPI_RX_DUAL; 1791 host->mem_ops = &cqspi_mem_ops; 1792 host->mem_caps = &cqspi_mem_caps; 1793 host->dev.of_node = pdev->dev.of_node; 1794 1795 cqspi = spi_controller_get_devdata(host); 1796 1797 cqspi->pdev = pdev; 1798 cqspi->host = host; 1799 cqspi->is_jh7110 = false; 1800 cqspi->ddata = ddata = of_device_get_match_data(dev); 1801 platform_set_drvdata(pdev, cqspi); 1802 1803 /* Obtain configuration from OF. */ 1804 ret = cqspi_of_get_pdata(cqspi); 1805 if (ret) { 1806 dev_err(dev, "Cannot get mandatory OF data.\n"); 1807 return -ENODEV; 1808 } 1809 1810 /* Obtain QSPI clock. */ 1811 cqspi->clk = devm_clk_get(dev, NULL); 1812 if (IS_ERR(cqspi->clk)) { 1813 dev_err(dev, "Cannot claim QSPI clock.\n"); 1814 ret = PTR_ERR(cqspi->clk); 1815 return ret; 1816 } 1817 1818 /* Obtain and remap controller address. */ 1819 cqspi->iobase = devm_platform_ioremap_resource(pdev, 0); 1820 if (IS_ERR(cqspi->iobase)) { 1821 dev_err(dev, "Cannot remap controller address.\n"); 1822 ret = PTR_ERR(cqspi->iobase); 1823 return ret; 1824 } 1825 1826 /* Obtain and remap AHB address. */ 1827 cqspi->ahb_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res_ahb); 1828 if (IS_ERR(cqspi->ahb_base)) { 1829 dev_err(dev, "Cannot remap AHB address.\n"); 1830 ret = PTR_ERR(cqspi->ahb_base); 1831 return ret; 1832 } 1833 cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start; 1834 cqspi->ahb_size = resource_size(res_ahb); 1835 1836 init_completion(&cqspi->transfer_complete); 1837 1838 /* Obtain IRQ line. */ 1839 irq = platform_get_irq(pdev, 0); 1840 if (irq < 0) 1841 return -ENXIO; 1842 1843 ret = pm_runtime_set_active(dev); 1844 if (ret) 1845 return ret; 1846 1847 1848 ret = clk_prepare_enable(cqspi->clk); 1849 if (ret) { 1850 dev_err(dev, "Cannot enable QSPI clock.\n"); 1851 goto probe_clk_failed; 1852 } 1853 1854 /* Obtain QSPI reset control */ 1855 rstc = devm_reset_control_get_optional_exclusive(dev, "qspi"); 1856 if (IS_ERR(rstc)) { 1857 ret = PTR_ERR(rstc); 1858 dev_err(dev, "Cannot get QSPI reset.\n"); 1859 goto probe_reset_failed; 1860 } 1861 1862 rstc_ocp = devm_reset_control_get_optional_exclusive(dev, "qspi-ocp"); 1863 if (IS_ERR(rstc_ocp)) { 1864 ret = PTR_ERR(rstc_ocp); 1865 dev_err(dev, "Cannot get QSPI OCP reset.\n"); 1866 goto probe_reset_failed; 1867 } 1868 1869 if (of_device_is_compatible(pdev->dev.of_node, "starfive,jh7110-qspi")) { 1870 rstc_ref = devm_reset_control_get_optional_exclusive(dev, "rstc_ref"); 1871 if (IS_ERR(rstc_ref)) { 1872 ret = PTR_ERR(rstc_ref); 1873 dev_err(dev, "Cannot get QSPI REF reset.\n"); 1874 goto probe_reset_failed; 1875 } 1876 reset_control_assert(rstc_ref); 1877 reset_control_deassert(rstc_ref); 1878 } 1879 1880 reset_control_assert(rstc); 1881 reset_control_deassert(rstc); 1882 1883 reset_control_assert(rstc_ocp); 1884 reset_control_deassert(rstc_ocp); 1885 1886 cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); 1887 host->max_speed_hz = cqspi->master_ref_clk_hz; 1888 1889 /* write completion is supported by default */ 1890 cqspi->wr_completion = true; 1891 1892 if (ddata) { 1893 if (ddata->quirks & CQSPI_NEEDS_WR_DELAY) 1894 cqspi->wr_delay = 50 * DIV_ROUND_UP(NSEC_PER_SEC, 1895 cqspi->master_ref_clk_hz); 1896 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL) 1897 host->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL; 1898 if (ddata->hwcaps_mask & CQSPI_SUPPORTS_QUAD) 1899 host->mode_bits |= SPI_TX_QUAD; 1900 if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) { 1901 cqspi->use_direct_mode = true; 1902 cqspi->use_direct_mode_wr = true; 1903 } 1904 if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA) 1905 cqspi->use_dma_read = true; 1906 if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION) 1907 cqspi->wr_completion = false; 1908 if (ddata->quirks & CQSPI_SLOW_SRAM) 1909 cqspi->slow_sram = true; 1910 if (ddata->quirks & CQSPI_NEEDS_APB_AHB_HAZARD_WAR) 1911 cqspi->apb_ahb_hazard = true; 1912 1913 if (ddata->jh7110_clk_init) { 1914 ret = cqspi_jh7110_clk_init(pdev, cqspi); 1915 if (ret) 1916 goto probe_reset_failed; 1917 } 1918 if (ddata->quirks & CQSPI_DISABLE_STIG_MODE) 1919 cqspi->disable_stig_mode = true; 1920 1921 if (ddata->quirks & CQSPI_DMA_SET_MASK) { 1922 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1923 if (ret) 1924 goto probe_reset_failed; 1925 } 1926 } 1927 1928 ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, 1929 pdev->name, cqspi); 1930 if (ret) { 1931 dev_err(dev, "Cannot request IRQ.\n"); 1932 goto probe_reset_failed; 1933 } 1934 1935 cqspi_wait_idle(cqspi); 1936 cqspi_controller_enable(cqspi, 0); 1937 cqspi_controller_detect_fifo_depth(cqspi); 1938 cqspi_controller_init(cqspi); 1939 cqspi_controller_enable(cqspi, 1); 1940 cqspi->current_cs = -1; 1941 cqspi->sclk = 0; 1942 1943 ret = cqspi_setup_flash(cqspi); 1944 if (ret) { 1945 dev_err(dev, "failed to setup flash parameters %d\n", ret); 1946 goto probe_setup_failed; 1947 } 1948 1949 host->num_chipselect = cqspi->num_chipselect; 1950 1951 if (ddata && (ddata->quirks & CQSPI_SUPPORT_DEVICE_RESET)) 1952 cqspi_device_reset(cqspi); 1953 1954 if (cqspi->use_direct_mode) { 1955 ret = cqspi_request_mmap_dma(cqspi); 1956 if (ret == -EPROBE_DEFER) 1957 goto probe_setup_failed; 1958 } 1959 1960 pm_runtime_enable(dev); 1961 1962 pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); 1963 pm_runtime_use_autosuspend(dev); 1964 pm_runtime_get_noresume(dev); 1965 1966 ret = spi_register_controller(host); 1967 if (ret) { 1968 dev_err(&pdev->dev, "failed to register SPI ctlr %d\n", ret); 1969 goto probe_setup_failed; 1970 } 1971 1972 pm_runtime_put_autosuspend(dev); 1973 1974 return 0; 1975 probe_setup_failed: 1976 cqspi_controller_enable(cqspi, 0); 1977 pm_runtime_disable(dev); 1978 probe_reset_failed: 1979 if (cqspi->is_jh7110) 1980 cqspi_jh7110_disable_clk(pdev, cqspi); 1981 clk_disable_unprepare(cqspi->clk); 1982 probe_clk_failed: 1983 return ret; 1984 } 1985 1986 static void cqspi_remove(struct platform_device *pdev) 1987 { 1988 struct cqspi_st *cqspi = platform_get_drvdata(pdev); 1989 1990 spi_unregister_controller(cqspi->host); 1991 cqspi_controller_enable(cqspi, 0); 1992 1993 if (cqspi->rx_chan) 1994 dma_release_channel(cqspi->rx_chan); 1995 1996 if (pm_runtime_get_sync(&pdev->dev) >= 0) 1997 clk_disable(cqspi->clk); 1998 1999 if (cqspi->is_jh7110) 2000 cqspi_jh7110_disable_clk(pdev, cqspi); 2001 2002 pm_runtime_put_sync(&pdev->dev); 2003 pm_runtime_disable(&pdev->dev); 2004 } 2005 2006 static int cqspi_runtime_suspend(struct device *dev) 2007 { 2008 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2009 2010 cqspi_controller_enable(cqspi, 0); 2011 clk_disable_unprepare(cqspi->clk); 2012 return 0; 2013 } 2014 2015 static int cqspi_runtime_resume(struct device *dev) 2016 { 2017 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2018 2019 clk_prepare_enable(cqspi->clk); 2020 cqspi_wait_idle(cqspi); 2021 cqspi_controller_enable(cqspi, 0); 2022 cqspi_controller_init(cqspi); 2023 cqspi_controller_enable(cqspi, 1); 2024 2025 cqspi->current_cs = -1; 2026 cqspi->sclk = 0; 2027 return 0; 2028 } 2029 2030 static int cqspi_suspend(struct device *dev) 2031 { 2032 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2033 int ret; 2034 2035 ret = spi_controller_suspend(cqspi->host); 2036 if (ret) 2037 return ret; 2038 2039 return pm_runtime_force_suspend(dev); 2040 } 2041 2042 static int cqspi_resume(struct device *dev) 2043 { 2044 struct cqspi_st *cqspi = dev_get_drvdata(dev); 2045 int ret; 2046 2047 ret = pm_runtime_force_resume(dev); 2048 if (ret) { 2049 dev_err(dev, "pm_runtime_force_resume failed on resume\n"); 2050 return ret; 2051 } 2052 2053 return spi_controller_resume(cqspi->host); 2054 } 2055 2056 static const struct dev_pm_ops cqspi_dev_pm_ops = { 2057 RUNTIME_PM_OPS(cqspi_runtime_suspend, cqspi_runtime_resume, NULL) 2058 SYSTEM_SLEEP_PM_OPS(cqspi_suspend, cqspi_resume) 2059 }; 2060 2061 static const struct cqspi_driver_platdata cdns_qspi = { 2062 .quirks = CQSPI_DISABLE_DAC_MODE, 2063 }; 2064 2065 static const struct cqspi_driver_platdata k2g_qspi = { 2066 .quirks = CQSPI_NEEDS_WR_DELAY, 2067 }; 2068 2069 static const struct cqspi_driver_platdata am654_ospi = { 2070 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL | CQSPI_SUPPORTS_QUAD, 2071 .quirks = CQSPI_NEEDS_WR_DELAY, 2072 }; 2073 2074 static const struct cqspi_driver_platdata intel_lgm_qspi = { 2075 .quirks = CQSPI_DISABLE_DAC_MODE, 2076 }; 2077 2078 static const struct cqspi_driver_platdata socfpga_qspi = { 2079 .quirks = CQSPI_DISABLE_DAC_MODE 2080 | CQSPI_NO_SUPPORT_WR_COMPLETION 2081 | CQSPI_SLOW_SRAM 2082 | CQSPI_DISABLE_STIG_MODE, 2083 }; 2084 2085 static const struct cqspi_driver_platdata versal_ospi = { 2086 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2087 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA 2088 | CQSPI_DMA_SET_MASK, 2089 .indirect_read_dma = cqspi_versal_indirect_read_dma, 2090 .get_dma_status = cqspi_get_versal_dma_status, 2091 }; 2092 2093 static const struct cqspi_driver_platdata versal2_ospi = { 2094 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2095 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA 2096 | CQSPI_DMA_SET_MASK 2097 | CQSPI_SUPPORT_DEVICE_RESET, 2098 .indirect_read_dma = cqspi_versal_indirect_read_dma, 2099 .get_dma_status = cqspi_get_versal_dma_status, 2100 }; 2101 2102 static const struct cqspi_driver_platdata jh7110_qspi = { 2103 .quirks = CQSPI_DISABLE_DAC_MODE, 2104 .jh7110_clk_init = cqspi_jh7110_clk_init, 2105 }; 2106 2107 static const struct cqspi_driver_platdata pensando_cdns_qspi = { 2108 .quirks = CQSPI_NEEDS_APB_AHB_HAZARD_WAR | CQSPI_DISABLE_DAC_MODE, 2109 }; 2110 2111 static const struct cqspi_driver_platdata mobileye_eyeq5_ospi = { 2112 .hwcaps_mask = CQSPI_SUPPORTS_OCTAL, 2113 .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION | 2114 CQSPI_RD_NO_IRQ, 2115 }; 2116 2117 static const struct of_device_id cqspi_dt_ids[] = { 2118 { 2119 .compatible = "cdns,qspi-nor", 2120 .data = &cdns_qspi, 2121 }, 2122 { 2123 .compatible = "ti,k2g-qspi", 2124 .data = &k2g_qspi, 2125 }, 2126 { 2127 .compatible = "ti,am654-ospi", 2128 .data = &am654_ospi, 2129 }, 2130 { 2131 .compatible = "intel,lgm-qspi", 2132 .data = &intel_lgm_qspi, 2133 }, 2134 { 2135 .compatible = "xlnx,versal-ospi-1.0", 2136 .data = &versal_ospi, 2137 }, 2138 { 2139 .compatible = "intel,socfpga-qspi", 2140 .data = &socfpga_qspi, 2141 }, 2142 { 2143 .compatible = "starfive,jh7110-qspi", 2144 .data = &jh7110_qspi, 2145 }, 2146 { 2147 .compatible = "amd,pensando-elba-qspi", 2148 .data = &pensando_cdns_qspi, 2149 }, 2150 { 2151 .compatible = "mobileye,eyeq5-ospi", 2152 .data = &mobileye_eyeq5_ospi, 2153 }, 2154 { 2155 .compatible = "amd,versal2-ospi", 2156 .data = &versal2_ospi, 2157 }, 2158 { /* end of table */ } 2159 }; 2160 2161 MODULE_DEVICE_TABLE(of, cqspi_dt_ids); 2162 2163 static struct platform_driver cqspi_platform_driver = { 2164 .probe = cqspi_probe, 2165 .remove = cqspi_remove, 2166 .driver = { 2167 .name = CQSPI_NAME, 2168 .pm = pm_ptr(&cqspi_dev_pm_ops), 2169 .of_match_table = cqspi_dt_ids, 2170 }, 2171 }; 2172 2173 module_platform_driver(cqspi_platform_driver); 2174 2175 MODULE_DESCRIPTION("Cadence QSPI Controller Driver"); 2176 MODULE_LICENSE("GPL v2"); 2177 MODULE_ALIAS("platform:" CQSPI_NAME); 2178 MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>"); 2179 MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>"); 2180 MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@intel.com>"); 2181 MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); 2182 MODULE_AUTHOR("Pratyush Yadav <p.yadav@ti.com>"); 2183