1 /* 2 * SPDX-License-Identifier: GPL-2.0 3 * 4 * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. 5 * 6 * Authors: 7 * Md Sadre Alam <quic_mdalam@quicinc.com> 8 * Sricharan R <quic_srichara@quicinc.com> 9 * Varadarajan Narayanan <quic_varada@quicinc.com> 10 */ 11 #include <linux/bitops.h> 12 #include <linux/clk.h> 13 #include <linux/delay.h> 14 #include <linux/dmaengine.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/dma/qcom_adm.h> 17 #include <linux/dma/qcom_bam_dma.h> 18 #include <linux/module.h> 19 #include <linux/of.h> 20 #include <linux/platform_device.h> 21 #include <linux/slab.h> 22 #include <linux/mtd/nand-qpic-common.h> 23 #include <linux/mtd/spinand.h> 24 #include <linux/bitfield.h> 25 26 #define NAND_FLASH_SPI_CFG 0xc0 27 #define NAND_NUM_ADDR_CYCLES 0xc4 28 #define NAND_BUSY_CHECK_WAIT_CNT 0xc8 29 #define NAND_FLASH_FEATURES 0xf64 30 31 /* QSPI NAND config reg bits */ 32 #define LOAD_CLK_CNTR_INIT_EN BIT(28) 33 #define CLK_CNTR_INIT_VAL_VEC 0x924 34 #define CLK_CNTR_INIT_VAL_VEC_MASK GENMASK(27, 16) 35 #define FEA_STATUS_DEV_ADDR 0xc0 36 #define FEA_STATUS_DEV_ADDR_MASK GENMASK(15, 8) 37 #define SPI_CFG BIT(0) 38 #define SPI_NUM_ADDR 0xDA4DB 39 #define SPI_WAIT_CNT 0x10 40 #define QPIC_QSPI_NUM_CS 1 41 #define SPI_TRANSFER_MODE_x1 BIT(29) 42 #define SPI_TRANSFER_MODE_x4 (3 << 29) 43 #define SPI_WP BIT(28) 44 #define SPI_HOLD BIT(27) 45 #define QPIC_SET_FEATURE BIT(31) 46 47 #define SPINAND_RESET 0xff 48 #define SPINAND_READID 0x9f 49 #define SPINAND_GET_FEATURE 0x0f 50 #define SPINAND_SET_FEATURE 0x1f 51 #define SPINAND_READ 0x13 52 #define SPINAND_ERASE 0xd8 53 #define SPINAND_WRITE_EN 0x06 54 #define SPINAND_PROGRAM_EXECUTE 0x10 55 #define SPINAND_PROGRAM_LOAD 0x84 56 57 #define ACC_FEATURE 0xe 58 #define BAD_BLOCK_MARKER_SIZE 0x2 59 #define OOB_BUF_SIZE 128 60 #define ecceng_to_qspi(eng) container_of(eng, struct qpic_spi_nand, ecc_eng) 61 62 struct qpic_snand_op { 63 u32 cmd_reg; 64 u32 addr1_reg; 65 u32 addr2_reg; 66 }; 67 68 struct snandc_read_status { 69 __le32 snandc_flash; 70 __le32 snandc_buffer; 71 __le32 snandc_erased_cw; 72 }; 73 74 /* 75 * ECC state struct 76 * @corrected: ECC corrected 77 * @bitflips: Max bit flip 78 * @failed: ECC failed 79 */ 80 struct qcom_ecc_stats { 81 u32 corrected; 82 u32 bitflips; 83 u32 failed; 84 }; 85 86 struct qpic_ecc { 87 struct device *dev; 88 int ecc_bytes_hw; 89 int spare_bytes; 90 int bbm_size; 91 int ecc_mode; 92 int bytes; 93 int steps; 94 int step_size; 95 int strength; 96 int cw_size; 97 int cw_data; 98 u32 cfg0; 99 u32 cfg1; 100 u32 cfg0_raw; 101 u32 cfg1_raw; 102 u32 ecc_buf_cfg; 103 u32 ecc_bch_cfg; 104 u32 clrflashstatus; 105 u32 clrreadstatus; 106 bool bch_enabled; 107 }; 108 109 struct qpic_spi_nand { 110 struct qcom_nand_controller *snandc; 111 struct spi_controller *ctlr; 112 struct mtd_info *mtd; 113 struct clk *iomacro_clk; 114 struct qpic_ecc *ecc; 115 struct qcom_ecc_stats ecc_stats; 116 struct nand_ecc_engine ecc_eng; 117 u8 *data_buf; 118 u8 *oob_buf; 119 u32 wlen; 120 __le32 addr1; 121 __le32 addr2; 122 __le32 cmd; 123 u32 num_cw; 124 bool oob_rw; 125 bool page_rw; 126 bool raw_rw; 127 }; 128 129 static void qcom_spi_set_read_loc_first(struct qcom_nand_controller *snandc, 130 int reg, int cw_offset, int read_size, 131 int is_last_read_loc) 132 { 133 __le32 locreg_val; 134 u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | 135 ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc) 136 << READ_LOCATION_LAST)); 137 138 locreg_val = cpu_to_le32(val); 139 140 if (reg == NAND_READ_LOCATION_0) 141 snandc->regs->read_location0 = locreg_val; 142 else if (reg == NAND_READ_LOCATION_1) 143 snandc->regs->read_location1 = locreg_val; 144 else if (reg == NAND_READ_LOCATION_2) 145 snandc->regs->read_location1 = locreg_val; 146 else if (reg == NAND_READ_LOCATION_3) 147 snandc->regs->read_location3 = locreg_val; 148 } 149 150 static void qcom_spi_set_read_loc_last(struct qcom_nand_controller *snandc, 151 int reg, int cw_offset, int read_size, 152 int is_last_read_loc) 153 { 154 __le32 locreg_val; 155 u32 val = (((cw_offset) << READ_LOCATION_OFFSET) | 156 ((read_size) << READ_LOCATION_SIZE) | ((is_last_read_loc) 157 << READ_LOCATION_LAST)); 158 159 locreg_val = cpu_to_le32(val); 160 161 if (reg == NAND_READ_LOCATION_LAST_CW_0) 162 snandc->regs->read_location_last0 = locreg_val; 163 else if (reg == NAND_READ_LOCATION_LAST_CW_1) 164 snandc->regs->read_location_last1 = locreg_val; 165 else if (reg == NAND_READ_LOCATION_LAST_CW_2) 166 snandc->regs->read_location_last2 = locreg_val; 167 else if (reg == NAND_READ_LOCATION_LAST_CW_3) 168 snandc->regs->read_location_last3 = locreg_val; 169 } 170 171 static struct qcom_nand_controller *nand_to_qcom_snand(struct nand_device *nand) 172 { 173 struct nand_ecc_engine *eng = nand->ecc.engine; 174 struct qpic_spi_nand *qspi = ecceng_to_qspi(eng); 175 176 return qspi->snandc; 177 } 178 179 static int qcom_spi_init(struct qcom_nand_controller *snandc) 180 { 181 u32 snand_cfg_val = 0x0; 182 int ret; 183 184 snand_cfg_val = FIELD_PREP(CLK_CNTR_INIT_VAL_VEC_MASK, CLK_CNTR_INIT_VAL_VEC) | 185 FIELD_PREP(LOAD_CLK_CNTR_INIT_EN, 0) | 186 FIELD_PREP(FEA_STATUS_DEV_ADDR_MASK, FEA_STATUS_DEV_ADDR) | 187 FIELD_PREP(SPI_CFG, 0); 188 189 snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val); 190 snandc->regs->num_addr_cycle = cpu_to_le32(SPI_NUM_ADDR); 191 snandc->regs->busy_wait_cnt = cpu_to_le32(SPI_WAIT_CNT); 192 193 qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0); 194 195 snand_cfg_val &= ~LOAD_CLK_CNTR_INIT_EN; 196 snandc->regs->spi_cfg = cpu_to_le32(snand_cfg_val); 197 198 qcom_write_reg_dma(snandc, &snandc->regs->spi_cfg, NAND_FLASH_SPI_CFG, 1, 0); 199 200 qcom_write_reg_dma(snandc, &snandc->regs->num_addr_cycle, NAND_NUM_ADDR_CYCLES, 1, 0); 201 qcom_write_reg_dma(snandc, &snandc->regs->busy_wait_cnt, NAND_BUSY_CHECK_WAIT_CNT, 1, 202 NAND_BAM_NEXT_SGL); 203 204 ret = qcom_submit_descs(snandc); 205 if (ret) { 206 dev_err(snandc->dev, "failure in submitting spi init descriptor\n"); 207 return ret; 208 } 209 210 return ret; 211 } 212 213 static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section, 214 struct mtd_oob_region *oobregion) 215 { 216 struct nand_device *nand = mtd_to_nanddev(mtd); 217 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); 218 struct qpic_ecc *qecc = snandc->qspi->ecc; 219 220 if (section > 1) 221 return -ERANGE; 222 223 oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes; 224 oobregion->offset = mtd->oobsize - oobregion->length; 225 226 return 0; 227 } 228 229 static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section, 230 struct mtd_oob_region *oobregion) 231 { 232 struct nand_device *nand = mtd_to_nanddev(mtd); 233 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); 234 struct qpic_ecc *qecc = snandc->qspi->ecc; 235 236 if (section) 237 return -ERANGE; 238 239 oobregion->length = qecc->steps * 4; 240 oobregion->offset = ((qecc->steps - 1) * qecc->bytes) + qecc->bbm_size; 241 242 return 0; 243 } 244 245 static const struct mtd_ooblayout_ops qcom_spi_ooblayout = { 246 .ecc = qcom_spi_ooblayout_ecc, 247 .free = qcom_spi_ooblayout_free, 248 }; 249 250 static int qcom_spi_ecc_init_ctx_pipelined(struct nand_device *nand) 251 { 252 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); 253 struct nand_ecc_props *conf = &nand->ecc.ctx.conf; 254 struct mtd_info *mtd = nanddev_to_mtd(nand); 255 int cwperpage, bad_block_byte; 256 struct qpic_ecc *ecc_cfg; 257 258 cwperpage = mtd->writesize / NANDC_STEP_SIZE; 259 snandc->qspi->num_cw = cwperpage; 260 261 ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); 262 if (!ecc_cfg) 263 return -ENOMEM; 264 snandc->qspi->oob_buf = kzalloc(mtd->writesize + mtd->oobsize, 265 GFP_KERNEL); 266 if (!snandc->qspi->oob_buf) { 267 kfree(ecc_cfg); 268 return -ENOMEM; 269 } 270 271 memset(snandc->qspi->oob_buf, 0xff, mtd->writesize + mtd->oobsize); 272 273 nand->ecc.ctx.priv = ecc_cfg; 274 snandc->qspi->mtd = mtd; 275 276 ecc_cfg->ecc_bytes_hw = 7; 277 ecc_cfg->spare_bytes = 4; 278 ecc_cfg->bbm_size = 1; 279 ecc_cfg->bch_enabled = true; 280 ecc_cfg->bytes = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes + ecc_cfg->bbm_size; 281 282 ecc_cfg->steps = 4; 283 ecc_cfg->strength = 4; 284 ecc_cfg->step_size = 512; 285 ecc_cfg->cw_data = 516; 286 ecc_cfg->cw_size = ecc_cfg->cw_data + ecc_cfg->bytes; 287 bad_block_byte = mtd->writesize - ecc_cfg->cw_size * (cwperpage - 1) + 1; 288 289 mtd_set_ooblayout(mtd, &qcom_spi_ooblayout); 290 291 ecc_cfg->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | 292 FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_data) | 293 FIELD_PREP(DISABLE_STATUS_AFTER_WRITE, 1) | 294 FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) | 295 FIELD_PREP(ECC_PARITY_SIZE_BYTES_RS, ecc_cfg->ecc_bytes_hw) | 296 FIELD_PREP(STATUS_BFR_READ, 0) | 297 FIELD_PREP(SET_RD_MODE_AFTER_STATUS, 1) | 298 FIELD_PREP(SPARE_SIZE_BYTES_MASK, ecc_cfg->spare_bytes); 299 300 ecc_cfg->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) | 301 FIELD_PREP(CS_ACTIVE_BSY, 0) | 302 FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, bad_block_byte) | 303 FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 0) | 304 FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) | 305 FIELD_PREP(WIDE_FLASH, 0) | 306 FIELD_PREP(ENABLE_BCH_ECC, ecc_cfg->bch_enabled); 307 308 ecc_cfg->cfg0_raw = FIELD_PREP(CW_PER_PAGE_MASK, (cwperpage - 1)) | 309 FIELD_PREP(NUM_ADDR_CYCLES_MASK, 3) | 310 FIELD_PREP(UD_SIZE_BYTES_MASK, ecc_cfg->cw_size) | 311 FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0); 312 313 ecc_cfg->cfg1_raw = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 0) | 314 FIELD_PREP(CS_ACTIVE_BSY, 0) | 315 FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) | 316 FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) | 317 FIELD_PREP(WR_RD_BSY_GAP_MASK, 20) | 318 FIELD_PREP(WIDE_FLASH, 0) | 319 FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1); 320 321 ecc_cfg->ecc_bch_cfg = FIELD_PREP(ECC_CFG_ECC_DISABLE, !ecc_cfg->bch_enabled) | 322 FIELD_PREP(ECC_SW_RESET, 0) | 323 FIELD_PREP(ECC_NUM_DATA_BYTES_MASK, ecc_cfg->cw_data) | 324 FIELD_PREP(ECC_FORCE_CLK_OPEN, 1) | 325 FIELD_PREP(ECC_MODE_MASK, 0) | 326 FIELD_PREP(ECC_PARITY_SIZE_BYTES_BCH_MASK, ecc_cfg->ecc_bytes_hw); 327 328 ecc_cfg->ecc_buf_cfg = 0x203 << NUM_STEPS; 329 ecc_cfg->clrflashstatus = FS_READY_BSY_N; 330 ecc_cfg->clrreadstatus = 0xc0; 331 332 conf->step_size = ecc_cfg->step_size; 333 conf->strength = ecc_cfg->strength; 334 335 snandc->regs->erased_cw_detect_cfg_clr = cpu_to_le32(CLR_ERASED_PAGE_DET); 336 snandc->regs->erased_cw_detect_cfg_set = cpu_to_le32(SET_ERASED_PAGE_DET); 337 338 dev_dbg(snandc->dev, "ECC strength: %u bits per %u bytes\n", 339 ecc_cfg->strength, ecc_cfg->step_size); 340 341 return 0; 342 } 343 344 static void qcom_spi_ecc_cleanup_ctx_pipelined(struct nand_device *nand) 345 { 346 struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand); 347 348 kfree(ecc_cfg); 349 } 350 351 static int qcom_spi_ecc_prepare_io_req_pipelined(struct nand_device *nand, 352 struct nand_page_io_req *req) 353 { 354 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); 355 struct qpic_ecc *ecc_cfg = nand_to_ecc_ctx(nand); 356 357 snandc->qspi->ecc = ecc_cfg; 358 snandc->qspi->raw_rw = false; 359 snandc->qspi->oob_rw = false; 360 snandc->qspi->page_rw = false; 361 362 if (req->datalen) 363 snandc->qspi->page_rw = true; 364 365 if (req->ooblen) 366 snandc->qspi->oob_rw = true; 367 368 if (req->mode == MTD_OPS_RAW) 369 snandc->qspi->raw_rw = true; 370 371 return 0; 372 } 373 374 static int qcom_spi_ecc_finish_io_req_pipelined(struct nand_device *nand, 375 struct nand_page_io_req *req) 376 { 377 struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand); 378 struct mtd_info *mtd = nanddev_to_mtd(nand); 379 380 if (req->mode == MTD_OPS_RAW || req->type != NAND_PAGE_READ) 381 return 0; 382 383 if (snandc->qspi->ecc_stats.failed) 384 mtd->ecc_stats.failed += snandc->qspi->ecc_stats.failed; 385 else 386 mtd->ecc_stats.corrected += snandc->qspi->ecc_stats.corrected; 387 388 if (snandc->qspi->ecc_stats.failed) 389 return -EBADMSG; 390 else 391 return snandc->qspi->ecc_stats.bitflips; 392 } 393 394 static struct nand_ecc_engine_ops qcom_spi_ecc_engine_ops_pipelined = { 395 .init_ctx = qcom_spi_ecc_init_ctx_pipelined, 396 .cleanup_ctx = qcom_spi_ecc_cleanup_ctx_pipelined, 397 .prepare_io_req = qcom_spi_ecc_prepare_io_req_pipelined, 398 .finish_io_req = qcom_spi_ecc_finish_io_req_pipelined, 399 }; 400 401 /* helper to configure location register values */ 402 static void qcom_spi_set_read_loc(struct qcom_nand_controller *snandc, int cw, int reg, 403 int cw_offset, int read_size, int is_last_read_loc) 404 { 405 int reg_base = NAND_READ_LOCATION_0; 406 int num_cw = snandc->qspi->num_cw; 407 408 if (cw == (num_cw - 1)) 409 reg_base = NAND_READ_LOCATION_LAST_CW_0; 410 411 reg_base += reg * 4; 412 413 if (cw == (num_cw - 1)) 414 return qcom_spi_set_read_loc_last(snandc, reg_base, cw_offset, 415 read_size, is_last_read_loc); 416 else 417 return qcom_spi_set_read_loc_first(snandc, reg_base, cw_offset, 418 read_size, is_last_read_loc); 419 } 420 421 static void 422 qcom_spi_config_cw_read(struct qcom_nand_controller *snandc, bool use_ecc, int cw) 423 { 424 __le32 *reg = &snandc->regs->read_location0; 425 int num_cw = snandc->qspi->num_cw; 426 427 qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL); 428 if (cw == (num_cw - 1)) { 429 reg = &snandc->regs->read_location_last0; 430 qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, 431 NAND_BAM_NEXT_SGL); 432 } 433 434 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); 435 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 436 437 qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 2, 0); 438 qcom_read_reg_dma(snandc, NAND_ERASED_CW_DETECT_STATUS, 1, 439 NAND_BAM_NEXT_SGL); 440 } 441 442 static int qcom_spi_block_erase(struct qcom_nand_controller *snandc) 443 { 444 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 445 int ret; 446 447 snandc->buf_count = 0; 448 snandc->buf_start = 0; 449 qcom_clear_read_regs(snandc); 450 qcom_clear_bam_transaction(snandc); 451 452 snandc->regs->cmd = snandc->qspi->cmd; 453 snandc->regs->addr0 = snandc->qspi->addr1; 454 snandc->regs->addr1 = snandc->qspi->addr2; 455 snandc->regs->cfg0 = cpu_to_le32(ecc_cfg->cfg0_raw & ~(7 << CW_PER_PAGE)); 456 snandc->regs->cfg1 = cpu_to_le32(ecc_cfg->cfg1_raw); 457 snandc->regs->exec = cpu_to_le32(1); 458 459 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); 460 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL); 461 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 462 463 ret = qcom_submit_descs(snandc); 464 if (ret) { 465 dev_err(snandc->dev, "failure to erase block\n"); 466 return ret; 467 } 468 469 return 0; 470 } 471 472 static void qcom_spi_config_single_cw_page_read(struct qcom_nand_controller *snandc, 473 bool use_ecc, int cw) 474 { 475 __le32 *reg = &snandc->regs->read_location0; 476 int num_cw = snandc->qspi->num_cw; 477 478 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); 479 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); 480 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, 481 NAND_ERASED_CW_DETECT_CFG, 1, 0); 482 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, 483 NAND_ERASED_CW_DETECT_CFG, 1, 484 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); 485 486 if (cw == (num_cw - 1)) { 487 reg = &snandc->regs->read_location_last0; 488 qcom_write_reg_dma(snandc, reg, NAND_READ_LOCATION_LAST_CW_0, 4, NAND_BAM_NEXT_SGL); 489 } 490 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); 491 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 492 493 qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, 0); 494 } 495 496 static int qcom_spi_read_last_cw(struct qcom_nand_controller *snandc, 497 const struct spi_mem_op *op) 498 { 499 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 500 struct mtd_info *mtd = snandc->qspi->mtd; 501 int size, ret = 0; 502 int col, bbpos; 503 u32 cfg0, cfg1, ecc_bch_cfg; 504 u32 num_cw = snandc->qspi->num_cw; 505 506 qcom_clear_bam_transaction(snandc); 507 qcom_clear_read_regs(snandc); 508 509 size = ecc_cfg->cw_size; 510 col = ecc_cfg->cw_size * (num_cw - 1); 511 512 memset(snandc->data_buffer, 0xff, size); 513 snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); 514 snandc->regs->addr1 = snandc->qspi->addr2; 515 516 cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | 517 0 << CW_PER_PAGE; 518 cfg1 = ecc_cfg->cfg1_raw; 519 ecc_bch_cfg = ECC_CFG_ECC_DISABLE; 520 521 snandc->regs->cmd = snandc->qspi->cmd; 522 snandc->regs->cfg0 = cpu_to_le32(cfg0); 523 snandc->regs->cfg1 = cpu_to_le32(cfg1); 524 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); 525 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); 526 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); 527 snandc->regs->exec = cpu_to_le32(1); 528 529 qcom_spi_set_read_loc(snandc, num_cw - 1, 0, 0, ecc_cfg->cw_size, 1); 530 531 qcom_spi_config_single_cw_page_read(snandc, false, num_cw - 1); 532 533 qcom_read_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, size, 0); 534 535 ret = qcom_submit_descs(snandc); 536 if (ret) { 537 dev_err(snandc->dev, "failed to read last cw\n"); 538 return ret; 539 } 540 541 qcom_nandc_dev_to_mem(snandc, true); 542 u32 flash = le32_to_cpu(snandc->reg_read_buf[0]); 543 544 if (flash & (FS_OP_ERR | FS_MPU_ERR)) 545 return -EIO; 546 547 bbpos = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); 548 549 if (snandc->data_buffer[bbpos] == 0xff) 550 snandc->data_buffer[bbpos + 1] = 0xff; 551 if (snandc->data_buffer[bbpos] != 0xff) 552 snandc->data_buffer[bbpos + 1] = snandc->data_buffer[bbpos]; 553 554 memcpy(op->data.buf.in, snandc->data_buffer + bbpos, op->data.nbytes); 555 556 return ret; 557 } 558 559 static int qcom_spi_check_error(struct qcom_nand_controller *snandc, u8 *data_buf, u8 *oob_buf) 560 { 561 struct snandc_read_status *buf; 562 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 563 int i, num_cw = snandc->qspi->num_cw; 564 bool flash_op_err = false, erased; 565 unsigned int max_bitflips = 0; 566 unsigned int uncorrectable_cws = 0; 567 568 snandc->qspi->ecc_stats.failed = 0; 569 snandc->qspi->ecc_stats.corrected = 0; 570 571 qcom_nandc_dev_to_mem(snandc, true); 572 buf = (struct snandc_read_status *)snandc->reg_read_buf; 573 574 for (i = 0; i < num_cw; i++, buf++) { 575 u32 flash, buffer, erased_cw; 576 int data_len, oob_len; 577 578 if (i == (num_cw - 1)) { 579 data_len = NANDC_STEP_SIZE - ((num_cw - 1) << 2); 580 oob_len = num_cw << 2; 581 } else { 582 data_len = ecc_cfg->cw_data; 583 oob_len = 0; 584 } 585 586 flash = le32_to_cpu(buf->snandc_flash); 587 buffer = le32_to_cpu(buf->snandc_buffer); 588 erased_cw = le32_to_cpu(buf->snandc_erased_cw); 589 590 if ((flash & FS_OP_ERR) && (buffer & BS_UNCORRECTABLE_BIT)) { 591 if (ecc_cfg->bch_enabled) 592 erased = (erased_cw & ERASED_CW) == ERASED_CW; 593 else 594 erased = false; 595 596 if (!erased) 597 uncorrectable_cws |= BIT(i); 598 599 } else if (flash & (FS_OP_ERR | FS_MPU_ERR)) { 600 flash_op_err = true; 601 } else { 602 unsigned int stat; 603 604 stat = buffer & BS_CORRECTABLE_ERR_MSK; 605 snandc->qspi->ecc_stats.corrected += stat; 606 max_bitflips = max(max_bitflips, stat); 607 } 608 609 if (data_buf) 610 data_buf += data_len; 611 if (oob_buf) 612 oob_buf += oob_len + ecc_cfg->bytes; 613 } 614 615 if (flash_op_err) 616 return -EIO; 617 618 if (!uncorrectable_cws) 619 snandc->qspi->ecc_stats.bitflips = max_bitflips; 620 else 621 snandc->qspi->ecc_stats.failed++; 622 623 return 0; 624 } 625 626 static int qcom_spi_check_raw_flash_errors(struct qcom_nand_controller *snandc, int cw_cnt) 627 { 628 int i; 629 630 qcom_nandc_dev_to_mem(snandc, true); 631 632 for (i = 0; i < cw_cnt; i++) { 633 u32 flash = le32_to_cpu(snandc->reg_read_buf[i]); 634 635 if (flash & (FS_OP_ERR | FS_MPU_ERR)) 636 return -EIO; 637 } 638 639 return 0; 640 } 641 642 static int qcom_spi_read_cw_raw(struct qcom_nand_controller *snandc, u8 *data_buf, 643 u8 *oob_buf, int cw) 644 { 645 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 646 struct mtd_info *mtd = snandc->qspi->mtd; 647 int data_size1, data_size2, oob_size1, oob_size2; 648 int ret, reg_off = FLASH_BUF_ACC, read_loc = 0; 649 int raw_cw = cw; 650 u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; 651 int col; 652 653 snandc->buf_count = 0; 654 snandc->buf_start = 0; 655 qcom_clear_read_regs(snandc); 656 qcom_clear_bam_transaction(snandc); 657 raw_cw = num_cw - 1; 658 659 cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | 660 0 << CW_PER_PAGE; 661 cfg1 = ecc_cfg->cfg1_raw; 662 ecc_bch_cfg = ECC_CFG_ECC_DISABLE; 663 664 col = ecc_cfg->cw_size * cw; 665 666 snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); 667 snandc->regs->addr1 = snandc->qspi->addr2; 668 snandc->regs->cmd = snandc->qspi->cmd; 669 snandc->regs->cfg0 = cpu_to_le32(cfg0); 670 snandc->regs->cfg1 = cpu_to_le32(cfg1); 671 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); 672 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); 673 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); 674 snandc->regs->exec = cpu_to_le32(1); 675 676 qcom_spi_set_read_loc(snandc, raw_cw, 0, 0, ecc_cfg->cw_size, 1); 677 678 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); 679 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); 680 qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0); 681 682 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, 683 NAND_ERASED_CW_DETECT_CFG, 1, 0); 684 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, 685 NAND_ERASED_CW_DETECT_CFG, 1, 686 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); 687 688 data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); 689 oob_size1 = ecc_cfg->bbm_size; 690 691 if (cw == (num_cw - 1)) { 692 data_size2 = NANDC_STEP_SIZE - data_size1 - 693 ((num_cw - 1) * 4); 694 oob_size2 = (num_cw * 4) + ecc_cfg->ecc_bytes_hw + 695 ecc_cfg->spare_bytes; 696 } else { 697 data_size2 = ecc_cfg->cw_data - data_size1; 698 oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; 699 } 700 701 qcom_spi_set_read_loc(snandc, cw, 0, read_loc, data_size1, 0); 702 read_loc += data_size1; 703 704 qcom_spi_set_read_loc(snandc, cw, 1, read_loc, oob_size1, 0); 705 read_loc += oob_size1; 706 707 qcom_spi_set_read_loc(snandc, cw, 2, read_loc, data_size2, 0); 708 read_loc += data_size2; 709 710 qcom_spi_set_read_loc(snandc, cw, 3, read_loc, oob_size2, 1); 711 712 qcom_spi_config_cw_read(snandc, false, raw_cw); 713 714 qcom_read_data_dma(snandc, reg_off, data_buf, data_size1, 0); 715 reg_off += data_size1; 716 717 qcom_read_data_dma(snandc, reg_off, oob_buf, oob_size1, 0); 718 reg_off += oob_size1; 719 720 qcom_read_data_dma(snandc, reg_off, data_buf + data_size1, data_size2, 0); 721 reg_off += data_size2; 722 723 qcom_read_data_dma(snandc, reg_off, oob_buf + oob_size1, oob_size2, 0); 724 725 ret = qcom_submit_descs(snandc); 726 if (ret) { 727 dev_err(snandc->dev, "failure to read raw cw %d\n", cw); 728 return ret; 729 } 730 731 return qcom_spi_check_raw_flash_errors(snandc, 1); 732 } 733 734 static int qcom_spi_read_page_raw(struct qcom_nand_controller *snandc, 735 const struct spi_mem_op *op) 736 { 737 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 738 u8 *data_buf = NULL, *oob_buf = NULL; 739 int ret, cw; 740 u32 num_cw = snandc->qspi->num_cw; 741 742 if (snandc->qspi->page_rw) 743 data_buf = op->data.buf.in; 744 745 oob_buf = snandc->qspi->oob_buf; 746 memset(oob_buf, 0xff, OOB_BUF_SIZE); 747 748 for (cw = 0; cw < num_cw; cw++) { 749 ret = qcom_spi_read_cw_raw(snandc, data_buf, oob_buf, cw); 750 if (ret) 751 return ret; 752 753 if (data_buf) 754 data_buf += ecc_cfg->cw_data; 755 if (oob_buf) 756 oob_buf += ecc_cfg->bytes; 757 } 758 759 return 0; 760 } 761 762 static int qcom_spi_read_page_ecc(struct qcom_nand_controller *snandc, 763 const struct spi_mem_op *op) 764 { 765 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 766 u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start; 767 int ret, i; 768 u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; 769 770 data_buf = op->data.buf.in; 771 data_buf_start = data_buf; 772 773 oob_buf = snandc->qspi->oob_buf; 774 oob_buf_start = oob_buf; 775 776 snandc->buf_count = 0; 777 snandc->buf_start = 0; 778 qcom_clear_read_regs(snandc); 779 780 cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | 781 (num_cw - 1) << CW_PER_PAGE; 782 cfg1 = ecc_cfg->cfg1; 783 ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; 784 785 snandc->regs->addr0 = snandc->qspi->addr1; 786 snandc->regs->addr1 = snandc->qspi->addr2; 787 snandc->regs->cmd = snandc->qspi->cmd; 788 snandc->regs->cfg0 = cpu_to_le32(cfg0); 789 snandc->regs->cfg1 = cpu_to_le32(cfg1); 790 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); 791 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); 792 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); 793 snandc->regs->exec = cpu_to_le32(1); 794 795 qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); 796 797 qcom_clear_bam_transaction(snandc); 798 799 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); 800 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); 801 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, 802 NAND_ERASED_CW_DETECT_CFG, 1, 0); 803 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, 804 NAND_ERASED_CW_DETECT_CFG, 1, 805 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); 806 807 for (i = 0; i < num_cw; i++) { 808 int data_size, oob_size; 809 810 if (i == (num_cw - 1)) { 811 data_size = 512 - ((num_cw - 1) << 2); 812 oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + 813 ecc_cfg->spare_bytes; 814 } else { 815 data_size = ecc_cfg->cw_data; 816 oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; 817 } 818 819 if (data_buf && oob_buf) { 820 qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 0); 821 qcom_spi_set_read_loc(snandc, i, 1, data_size, oob_size, 1); 822 } else if (data_buf) { 823 qcom_spi_set_read_loc(snandc, i, 0, 0, data_size, 1); 824 } else { 825 qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1); 826 } 827 828 qcom_spi_config_cw_read(snandc, true, i); 829 830 if (data_buf) 831 qcom_read_data_dma(snandc, FLASH_BUF_ACC, data_buf, 832 data_size, 0); 833 if (oob_buf) { 834 int j; 835 836 for (j = 0; j < ecc_cfg->bbm_size; j++) 837 *oob_buf++ = 0xff; 838 839 qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size, 840 oob_buf, oob_size, 0); 841 } 842 843 if (data_buf) 844 data_buf += data_size; 845 if (oob_buf) 846 oob_buf += oob_size; 847 } 848 849 ret = qcom_submit_descs(snandc); 850 if (ret) { 851 dev_err(snandc->dev, "failure to read page\n"); 852 return ret; 853 } 854 855 return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start); 856 } 857 858 static int qcom_spi_read_page_oob(struct qcom_nand_controller *snandc, 859 const struct spi_mem_op *op) 860 { 861 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 862 u8 *data_buf = NULL, *data_buf_start, *oob_buf = NULL, *oob_buf_start; 863 int ret, i; 864 u32 cfg0, cfg1, ecc_bch_cfg, num_cw = snandc->qspi->num_cw; 865 866 oob_buf = op->data.buf.in; 867 oob_buf_start = oob_buf; 868 869 data_buf_start = data_buf; 870 871 snandc->buf_count = 0; 872 snandc->buf_start = 0; 873 qcom_clear_read_regs(snandc); 874 qcom_clear_bam_transaction(snandc); 875 876 cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | 877 (num_cw - 1) << CW_PER_PAGE; 878 cfg1 = ecc_cfg->cfg1; 879 ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; 880 881 snandc->regs->addr0 = snandc->qspi->addr1; 882 snandc->regs->addr1 = snandc->qspi->addr2; 883 snandc->regs->cmd = snandc->qspi->cmd; 884 snandc->regs->cfg0 = cpu_to_le32(cfg0); 885 snandc->regs->cfg1 = cpu_to_le32(cfg1); 886 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); 887 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); 888 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); 889 snandc->regs->exec = cpu_to_le32(1); 890 891 qcom_spi_set_read_loc(snandc, 0, 0, 0, ecc_cfg->cw_data, 1); 892 893 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); 894 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); 895 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_clr, 896 NAND_ERASED_CW_DETECT_CFG, 1, 0); 897 qcom_write_reg_dma(snandc, &snandc->regs->erased_cw_detect_cfg_set, 898 NAND_ERASED_CW_DETECT_CFG, 1, 899 NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); 900 901 for (i = 0; i < num_cw; i++) { 902 int data_size, oob_size; 903 904 if (i == (num_cw - 1)) { 905 data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); 906 oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + 907 ecc_cfg->spare_bytes; 908 } else { 909 data_size = ecc_cfg->cw_data; 910 oob_size = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; 911 } 912 913 qcom_spi_set_read_loc(snandc, i, 0, data_size, oob_size, 1); 914 915 qcom_spi_config_cw_read(snandc, true, i); 916 917 if (oob_buf) { 918 int j; 919 920 for (j = 0; j < ecc_cfg->bbm_size; j++) 921 *oob_buf++ = 0xff; 922 923 qcom_read_data_dma(snandc, FLASH_BUF_ACC + data_size, 924 oob_buf, oob_size, 0); 925 } 926 927 if (oob_buf) 928 oob_buf += oob_size; 929 } 930 931 ret = qcom_submit_descs(snandc); 932 if (ret) { 933 dev_err(snandc->dev, "failure to read oob\n"); 934 return ret; 935 } 936 937 return qcom_spi_check_error(snandc, data_buf_start, oob_buf_start); 938 } 939 940 static int qcom_spi_read_page(struct qcom_nand_controller *snandc, 941 const struct spi_mem_op *op) 942 { 943 if (snandc->qspi->page_rw && snandc->qspi->raw_rw) 944 return qcom_spi_read_page_raw(snandc, op); 945 946 if (snandc->qspi->page_rw) 947 return qcom_spi_read_page_ecc(snandc, op); 948 949 if (snandc->qspi->oob_rw && snandc->qspi->raw_rw) 950 return qcom_spi_read_last_cw(snandc, op); 951 952 if (snandc->qspi->oob_rw) 953 return qcom_spi_read_page_oob(snandc, op); 954 955 return 0; 956 } 957 958 static void qcom_spi_config_page_write(struct qcom_nand_controller *snandc) 959 { 960 qcom_write_reg_dma(snandc, &snandc->regs->addr0, NAND_ADDR0, 2, 0); 961 qcom_write_reg_dma(snandc, &snandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0); 962 qcom_write_reg_dma(snandc, &snandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 963 1, NAND_BAM_NEXT_SGL); 964 } 965 966 static void qcom_spi_config_cw_write(struct qcom_nand_controller *snandc) 967 { 968 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); 969 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 970 qcom_read_reg_dma(snandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL); 971 972 qcom_write_reg_dma(snandc, &snandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0); 973 qcom_write_reg_dma(snandc, &snandc->regs->clrreadstatus, NAND_READ_STATUS, 1, 974 NAND_BAM_NEXT_SGL); 975 } 976 977 static int qcom_spi_program_raw(struct qcom_nand_controller *snandc, 978 const struct spi_mem_op *op) 979 { 980 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 981 struct mtd_info *mtd = snandc->qspi->mtd; 982 u8 *data_buf = NULL, *oob_buf = NULL; 983 int i, ret; 984 int num_cw = snandc->qspi->num_cw; 985 u32 cfg0, cfg1, ecc_bch_cfg; 986 987 cfg0 = (ecc_cfg->cfg0_raw & ~(7U << CW_PER_PAGE)) | 988 (num_cw - 1) << CW_PER_PAGE; 989 cfg1 = ecc_cfg->cfg1_raw; 990 ecc_bch_cfg = ECC_CFG_ECC_DISABLE; 991 992 data_buf = snandc->qspi->data_buf; 993 994 oob_buf = snandc->qspi->oob_buf; 995 memset(oob_buf, 0xff, OOB_BUF_SIZE); 996 997 snandc->buf_count = 0; 998 snandc->buf_start = 0; 999 qcom_clear_read_regs(snandc); 1000 qcom_clear_bam_transaction(snandc); 1001 1002 snandc->regs->addr0 = snandc->qspi->addr1; 1003 snandc->regs->addr1 = snandc->qspi->addr2; 1004 snandc->regs->cmd = snandc->qspi->cmd; 1005 snandc->regs->cfg0 = cpu_to_le32(cfg0); 1006 snandc->regs->cfg1 = cpu_to_le32(cfg1); 1007 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); 1008 snandc->regs->clrflashstatus = cpu_to_le32(ecc_cfg->clrflashstatus); 1009 snandc->regs->clrreadstatus = cpu_to_le32(ecc_cfg->clrreadstatus); 1010 snandc->regs->exec = cpu_to_le32(1); 1011 1012 qcom_spi_config_page_write(snandc); 1013 1014 for (i = 0; i < num_cw; i++) { 1015 int data_size1, data_size2, oob_size1, oob_size2; 1016 int reg_off = FLASH_BUF_ACC; 1017 1018 data_size1 = mtd->writesize - ecc_cfg->cw_size * (num_cw - 1); 1019 oob_size1 = ecc_cfg->bbm_size; 1020 1021 if (i == (num_cw - 1)) { 1022 data_size2 = NANDC_STEP_SIZE - data_size1 - 1023 ((num_cw - 1) << 2); 1024 oob_size2 = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + 1025 ecc_cfg->spare_bytes; 1026 } else { 1027 data_size2 = ecc_cfg->cw_data - data_size1; 1028 oob_size2 = ecc_cfg->ecc_bytes_hw + ecc_cfg->spare_bytes; 1029 } 1030 1031 qcom_write_data_dma(snandc, reg_off, data_buf, data_size1, 1032 NAND_BAM_NO_EOT); 1033 reg_off += data_size1; 1034 data_buf += data_size1; 1035 1036 qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size1, 1037 NAND_BAM_NO_EOT); 1038 oob_buf += oob_size1; 1039 reg_off += oob_size1; 1040 1041 qcom_write_data_dma(snandc, reg_off, data_buf, data_size2, 1042 NAND_BAM_NO_EOT); 1043 reg_off += data_size2; 1044 data_buf += data_size2; 1045 1046 qcom_write_data_dma(snandc, reg_off, oob_buf, oob_size2, 0); 1047 oob_buf += oob_size2; 1048 1049 qcom_spi_config_cw_write(snandc); 1050 } 1051 1052 ret = qcom_submit_descs(snandc); 1053 if (ret) { 1054 dev_err(snandc->dev, "failure to write raw page\n"); 1055 return ret; 1056 } 1057 1058 return 0; 1059 } 1060 1061 static int qcom_spi_program_ecc(struct qcom_nand_controller *snandc, 1062 const struct spi_mem_op *op) 1063 { 1064 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 1065 u8 *data_buf = NULL, *oob_buf = NULL; 1066 int i, ret; 1067 int num_cw = snandc->qspi->num_cw; 1068 u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; 1069 1070 cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | 1071 (num_cw - 1) << CW_PER_PAGE; 1072 cfg1 = ecc_cfg->cfg1; 1073 ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; 1074 ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; 1075 1076 if (snandc->qspi->data_buf) 1077 data_buf = snandc->qspi->data_buf; 1078 1079 oob_buf = snandc->qspi->oob_buf; 1080 1081 snandc->buf_count = 0; 1082 snandc->buf_start = 0; 1083 qcom_clear_read_regs(snandc); 1084 qcom_clear_bam_transaction(snandc); 1085 1086 snandc->regs->addr0 = snandc->qspi->addr1; 1087 snandc->regs->addr1 = snandc->qspi->addr2; 1088 snandc->regs->cmd = snandc->qspi->cmd; 1089 snandc->regs->cfg0 = cpu_to_le32(cfg0); 1090 snandc->regs->cfg1 = cpu_to_le32(cfg1); 1091 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); 1092 snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg); 1093 snandc->regs->exec = cpu_to_le32(1); 1094 1095 qcom_spi_config_page_write(snandc); 1096 1097 for (i = 0; i < num_cw; i++) { 1098 int data_size, oob_size; 1099 1100 if (i == (num_cw - 1)) { 1101 data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); 1102 oob_size = (num_cw << 2) + ecc_cfg->ecc_bytes_hw + 1103 ecc_cfg->spare_bytes; 1104 } else { 1105 data_size = ecc_cfg->cw_data; 1106 oob_size = ecc_cfg->bytes; 1107 } 1108 1109 if (data_buf) 1110 qcom_write_data_dma(snandc, FLASH_BUF_ACC, data_buf, data_size, 1111 i == (num_cw - 1) ? NAND_BAM_NO_EOT : 0); 1112 1113 if (i == (num_cw - 1)) { 1114 if (oob_buf) { 1115 oob_buf += ecc_cfg->bbm_size; 1116 qcom_write_data_dma(snandc, FLASH_BUF_ACC + data_size, 1117 oob_buf, oob_size, 0); 1118 } 1119 } 1120 1121 qcom_spi_config_cw_write(snandc); 1122 1123 if (data_buf) 1124 data_buf += data_size; 1125 if (oob_buf) 1126 oob_buf += oob_size; 1127 } 1128 1129 ret = qcom_submit_descs(snandc); 1130 if (ret) { 1131 dev_err(snandc->dev, "failure to write page\n"); 1132 return ret; 1133 } 1134 1135 return 0; 1136 } 1137 1138 static int qcom_spi_program_oob(struct qcom_nand_controller *snandc, 1139 const struct spi_mem_op *op) 1140 { 1141 struct qpic_ecc *ecc_cfg = snandc->qspi->ecc; 1142 u8 *oob_buf = NULL; 1143 int ret, col, data_size, oob_size; 1144 int num_cw = snandc->qspi->num_cw; 1145 u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg; 1146 1147 cfg0 = (ecc_cfg->cfg0 & ~(7U << CW_PER_PAGE)) | 1148 (num_cw - 1) << CW_PER_PAGE; 1149 cfg1 = ecc_cfg->cfg1; 1150 ecc_bch_cfg = ecc_cfg->ecc_bch_cfg; 1151 ecc_buf_cfg = ecc_cfg->ecc_buf_cfg; 1152 1153 col = ecc_cfg->cw_size * (num_cw - 1); 1154 1155 oob_buf = snandc->qspi->data_buf; 1156 1157 snandc->buf_count = 0; 1158 snandc->buf_start = 0; 1159 qcom_clear_read_regs(snandc); 1160 qcom_clear_bam_transaction(snandc); 1161 snandc->regs->addr0 = (snandc->qspi->addr1 | cpu_to_le32(col)); 1162 snandc->regs->addr1 = snandc->qspi->addr2; 1163 snandc->regs->cmd = snandc->qspi->cmd; 1164 snandc->regs->cfg0 = cpu_to_le32(cfg0); 1165 snandc->regs->cfg1 = cpu_to_le32(cfg1); 1166 snandc->regs->ecc_bch_cfg = cpu_to_le32(ecc_bch_cfg); 1167 snandc->regs->ecc_buf_cfg = cpu_to_le32(ecc_buf_cfg); 1168 snandc->regs->exec = cpu_to_le32(1); 1169 1170 /* calculate the data and oob size for the last codeword/step */ 1171 data_size = NANDC_STEP_SIZE - ((num_cw - 1) << 2); 1172 oob_size = snandc->qspi->mtd->oobavail; 1173 1174 memset(snandc->data_buffer, 0xff, ecc_cfg->cw_data); 1175 /* override new oob content to last codeword */ 1176 mtd_ooblayout_get_databytes(snandc->qspi->mtd, snandc->data_buffer + data_size, 1177 oob_buf, 0, snandc->qspi->mtd->oobavail); 1178 qcom_spi_config_page_write(snandc); 1179 qcom_write_data_dma(snandc, FLASH_BUF_ACC, snandc->data_buffer, data_size + oob_size, 0); 1180 qcom_spi_config_cw_write(snandc); 1181 1182 ret = qcom_submit_descs(snandc); 1183 if (ret) { 1184 dev_err(snandc->dev, "failure to write oob\n"); 1185 return ret; 1186 } 1187 1188 return 0; 1189 } 1190 1191 static int qcom_spi_program_execute(struct qcom_nand_controller *snandc, 1192 const struct spi_mem_op *op) 1193 { 1194 if (snandc->qspi->page_rw && snandc->qspi->raw_rw) 1195 return qcom_spi_program_raw(snandc, op); 1196 1197 if (snandc->qspi->page_rw) 1198 return qcom_spi_program_ecc(snandc, op); 1199 1200 if (snandc->qspi->oob_rw) 1201 return qcom_spi_program_oob(snandc, op); 1202 1203 return 0; 1204 } 1205 1206 static int qcom_spi_cmd_mapping(struct qcom_nand_controller *snandc, u32 opcode, u32 *cmd) 1207 { 1208 switch (opcode) { 1209 case SPINAND_RESET: 1210 *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_RESET_DEVICE); 1211 break; 1212 case SPINAND_READID: 1213 *cmd = (SPI_WP | SPI_HOLD | SPI_TRANSFER_MODE_x1 | OP_FETCH_ID); 1214 break; 1215 case SPINAND_GET_FEATURE: 1216 *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE); 1217 break; 1218 case SPINAND_SET_FEATURE: 1219 *cmd = (SPI_TRANSFER_MODE_x1 | SPI_WP | SPI_HOLD | ACC_FEATURE | 1220 QPIC_SET_FEATURE); 1221 break; 1222 case SPINAND_READ: 1223 if (snandc->qspi->raw_rw) { 1224 *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | 1225 SPI_WP | SPI_HOLD | OP_PAGE_READ); 1226 } else { 1227 *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | 1228 SPI_WP | SPI_HOLD | OP_PAGE_READ_WITH_ECC); 1229 } 1230 1231 break; 1232 case SPINAND_ERASE: 1233 *cmd = OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE | SPI_WP | 1234 SPI_HOLD | SPI_TRANSFER_MODE_x1; 1235 break; 1236 case SPINAND_WRITE_EN: 1237 *cmd = SPINAND_WRITE_EN; 1238 break; 1239 case SPINAND_PROGRAM_EXECUTE: 1240 *cmd = (PAGE_ACC | LAST_PAGE | SPI_TRANSFER_MODE_x1 | 1241 SPI_WP | SPI_HOLD | OP_PROGRAM_PAGE); 1242 break; 1243 case SPINAND_PROGRAM_LOAD: 1244 *cmd = SPINAND_PROGRAM_LOAD; 1245 break; 1246 default: 1247 dev_err(snandc->dev, "Opcode not supported: %u\n", opcode); 1248 return -EOPNOTSUPP; 1249 } 1250 1251 return 0; 1252 } 1253 1254 static int qcom_spi_write_page(struct qcom_nand_controller *snandc, 1255 const struct spi_mem_op *op) 1256 { 1257 int ret; 1258 u32 cmd; 1259 1260 ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd); 1261 if (ret < 0) 1262 return ret; 1263 1264 if (op->cmd.opcode == SPINAND_PROGRAM_LOAD) 1265 snandc->qspi->data_buf = (u8 *)op->data.buf.out; 1266 1267 return 0; 1268 } 1269 1270 static int qcom_spi_send_cmdaddr(struct qcom_nand_controller *snandc, 1271 const struct spi_mem_op *op) 1272 { 1273 struct qpic_snand_op s_op = {}; 1274 u32 cmd; 1275 int ret, opcode; 1276 1277 ret = qcom_spi_cmd_mapping(snandc, op->cmd.opcode, &cmd); 1278 if (ret < 0) 1279 return ret; 1280 1281 s_op.cmd_reg = cmd; 1282 s_op.addr1_reg = op->addr.val; 1283 s_op.addr2_reg = 0; 1284 1285 opcode = op->cmd.opcode; 1286 1287 switch (opcode) { 1288 case SPINAND_WRITE_EN: 1289 return 0; 1290 case SPINAND_PROGRAM_EXECUTE: 1291 s_op.addr1_reg = op->addr.val << 16; 1292 s_op.addr2_reg = op->addr.val >> 16 & 0xff; 1293 snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); 1294 snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); 1295 snandc->qspi->cmd = cpu_to_le32(cmd); 1296 return qcom_spi_program_execute(snandc, op); 1297 case SPINAND_READ: 1298 s_op.addr1_reg = (op->addr.val << 16); 1299 s_op.addr2_reg = op->addr.val >> 16 & 0xff; 1300 snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg); 1301 snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); 1302 snandc->qspi->cmd = cpu_to_le32(cmd); 1303 return 0; 1304 case SPINAND_ERASE: 1305 s_op.addr2_reg = (op->addr.val >> 16) & 0xffff; 1306 s_op.addr1_reg = op->addr.val; 1307 snandc->qspi->addr1 = cpu_to_le32(s_op.addr1_reg << 16); 1308 snandc->qspi->addr2 = cpu_to_le32(s_op.addr2_reg); 1309 snandc->qspi->cmd = cpu_to_le32(cmd); 1310 qcom_spi_block_erase(snandc); 1311 return 0; 1312 default: 1313 break; 1314 } 1315 1316 snandc->buf_count = 0; 1317 snandc->buf_start = 0; 1318 qcom_clear_read_regs(snandc); 1319 qcom_clear_bam_transaction(snandc); 1320 1321 snandc->regs->cmd = cpu_to_le32(s_op.cmd_reg); 1322 snandc->regs->exec = cpu_to_le32(1); 1323 snandc->regs->addr0 = cpu_to_le32(s_op.addr1_reg); 1324 snandc->regs->addr1 = cpu_to_le32(s_op.addr2_reg); 1325 1326 qcom_write_reg_dma(snandc, &snandc->regs->cmd, NAND_FLASH_CMD, 3, NAND_BAM_NEXT_SGL); 1327 qcom_write_reg_dma(snandc, &snandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL); 1328 1329 ret = qcom_submit_descs(snandc); 1330 if (ret) 1331 dev_err(snandc->dev, "failure in submitting cmd descriptor\n"); 1332 1333 return ret; 1334 } 1335 1336 static int qcom_spi_io_op(struct qcom_nand_controller *snandc, const struct spi_mem_op *op) 1337 { 1338 int ret, val, opcode; 1339 bool copy = false, copy_ftr = false; 1340 1341 ret = qcom_spi_send_cmdaddr(snandc, op); 1342 if (ret) 1343 return ret; 1344 1345 snandc->buf_count = 0; 1346 snandc->buf_start = 0; 1347 qcom_clear_read_regs(snandc); 1348 qcom_clear_bam_transaction(snandc); 1349 opcode = op->cmd.opcode; 1350 1351 switch (opcode) { 1352 case SPINAND_READID: 1353 snandc->buf_count = 4; 1354 qcom_read_reg_dma(snandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL); 1355 copy = true; 1356 break; 1357 case SPINAND_GET_FEATURE: 1358 snandc->buf_count = 4; 1359 qcom_read_reg_dma(snandc, NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL); 1360 copy_ftr = true; 1361 break; 1362 case SPINAND_SET_FEATURE: 1363 snandc->regs->flash_feature = cpu_to_le32(*(u32 *)op->data.buf.out); 1364 qcom_write_reg_dma(snandc, &snandc->regs->flash_feature, 1365 NAND_FLASH_FEATURES, 1, NAND_BAM_NEXT_SGL); 1366 break; 1367 case SPINAND_PROGRAM_EXECUTE: 1368 case SPINAND_WRITE_EN: 1369 case SPINAND_RESET: 1370 case SPINAND_ERASE: 1371 case SPINAND_READ: 1372 return 0; 1373 default: 1374 return -EOPNOTSUPP; 1375 } 1376 1377 ret = qcom_submit_descs(snandc); 1378 if (ret) 1379 dev_err(snandc->dev, "failure in submitting descriptor for:%d\n", opcode); 1380 1381 if (copy) { 1382 qcom_nandc_dev_to_mem(snandc, true); 1383 memcpy(op->data.buf.in, snandc->reg_read_buf, snandc->buf_count); 1384 } 1385 1386 if (copy_ftr) { 1387 qcom_nandc_dev_to_mem(snandc, true); 1388 val = le32_to_cpu(*(__le32 *)snandc->reg_read_buf); 1389 val >>= 8; 1390 memcpy(op->data.buf.in, &val, snandc->buf_count); 1391 } 1392 1393 return ret; 1394 } 1395 1396 static bool qcom_spi_is_page_op(const struct spi_mem_op *op) 1397 { 1398 if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && op->addr.buswidth != 4) 1399 return false; 1400 1401 if (op->data.dir == SPI_MEM_DATA_IN) { 1402 if (op->addr.buswidth == 4 && op->data.buswidth == 4) 1403 return true; 1404 1405 if (op->addr.nbytes == 2 && op->addr.buswidth == 1) 1406 return true; 1407 1408 } else if (op->data.dir == SPI_MEM_DATA_OUT) { 1409 if (op->data.buswidth == 4) 1410 return true; 1411 if (op->addr.nbytes == 2 && op->addr.buswidth == 1) 1412 return true; 1413 } 1414 1415 return false; 1416 } 1417 1418 static bool qcom_spi_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) 1419 { 1420 if (!spi_mem_default_supports_op(mem, op)) 1421 return false; 1422 1423 if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) 1424 return false; 1425 1426 if (qcom_spi_is_page_op(op)) 1427 return true; 1428 1429 return ((!op->addr.nbytes || op->addr.buswidth == 1) && 1430 (!op->dummy.nbytes || op->dummy.buswidth == 1) && 1431 (!op->data.nbytes || op->data.buswidth == 1)); 1432 } 1433 1434 static int qcom_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) 1435 { 1436 struct qcom_nand_controller *snandc = spi_controller_get_devdata(mem->spi->controller); 1437 1438 dev_dbg(snandc->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, 1439 op->addr.val, op->addr.buswidth, op->addr.nbytes, 1440 op->data.buswidth, op->data.nbytes); 1441 1442 if (qcom_spi_is_page_op(op)) { 1443 if (op->data.dir == SPI_MEM_DATA_IN) 1444 return qcom_spi_read_page(snandc, op); 1445 if (op->data.dir == SPI_MEM_DATA_OUT) 1446 return qcom_spi_write_page(snandc, op); 1447 } else { 1448 return qcom_spi_io_op(snandc, op); 1449 } 1450 1451 return 0; 1452 } 1453 1454 static const struct spi_controller_mem_ops qcom_spi_mem_ops = { 1455 .supports_op = qcom_spi_supports_op, 1456 .exec_op = qcom_spi_exec_op, 1457 }; 1458 1459 static const struct spi_controller_mem_caps qcom_spi_mem_caps = { 1460 .ecc = true, 1461 }; 1462 1463 static int qcom_spi_probe(struct platform_device *pdev) 1464 { 1465 struct device *dev = &pdev->dev; 1466 struct spi_controller *ctlr; 1467 struct qcom_nand_controller *snandc; 1468 struct qpic_spi_nand *qspi; 1469 struct qpic_ecc *ecc; 1470 struct resource *res; 1471 const void *dev_data; 1472 int ret; 1473 1474 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); 1475 if (!ecc) 1476 return -ENOMEM; 1477 1478 qspi = devm_kzalloc(dev, sizeof(*qspi), GFP_KERNEL); 1479 if (!qspi) 1480 return -ENOMEM; 1481 1482 ctlr = __devm_spi_alloc_controller(dev, sizeof(*snandc), false); 1483 if (!ctlr) 1484 return -ENOMEM; 1485 1486 platform_set_drvdata(pdev, ctlr); 1487 1488 snandc = spi_controller_get_devdata(ctlr); 1489 qspi->snandc = snandc; 1490 1491 snandc->dev = dev; 1492 snandc->qspi = qspi; 1493 snandc->qspi->ctlr = ctlr; 1494 snandc->qspi->ecc = ecc; 1495 1496 dev_data = of_device_get_match_data(dev); 1497 if (!dev_data) { 1498 dev_err(&pdev->dev, "failed to get device data\n"); 1499 return -ENODEV; 1500 } 1501 1502 snandc->props = dev_data; 1503 snandc->dev = &pdev->dev; 1504 1505 snandc->core_clk = devm_clk_get(dev, "core"); 1506 if (IS_ERR(snandc->core_clk)) 1507 return PTR_ERR(snandc->core_clk); 1508 1509 snandc->aon_clk = devm_clk_get(dev, "aon"); 1510 if (IS_ERR(snandc->aon_clk)) 1511 return PTR_ERR(snandc->aon_clk); 1512 1513 snandc->qspi->iomacro_clk = devm_clk_get(dev, "iom"); 1514 if (IS_ERR(snandc->qspi->iomacro_clk)) 1515 return PTR_ERR(snandc->qspi->iomacro_clk); 1516 1517 snandc->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); 1518 if (IS_ERR(snandc->base)) 1519 return PTR_ERR(snandc->base); 1520 1521 snandc->base_phys = res->start; 1522 snandc->base_dma = dma_map_resource(dev, res->start, resource_size(res), 1523 DMA_BIDIRECTIONAL, 0); 1524 if (dma_mapping_error(dev, snandc->base_dma)) 1525 return -ENXIO; 1526 1527 ret = clk_prepare_enable(snandc->core_clk); 1528 if (ret) 1529 goto err_dis_core_clk; 1530 1531 ret = clk_prepare_enable(snandc->aon_clk); 1532 if (ret) 1533 goto err_dis_aon_clk; 1534 1535 ret = clk_prepare_enable(snandc->qspi->iomacro_clk); 1536 if (ret) 1537 goto err_dis_iom_clk; 1538 1539 ret = qcom_nandc_alloc(snandc); 1540 if (ret) 1541 goto err_snand_alloc; 1542 1543 ret = qcom_spi_init(snandc); 1544 if (ret) 1545 goto err_spi_init; 1546 1547 /* setup ECC engine */ 1548 snandc->qspi->ecc_eng.dev = &pdev->dev; 1549 snandc->qspi->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; 1550 snandc->qspi->ecc_eng.ops = &qcom_spi_ecc_engine_ops_pipelined; 1551 snandc->qspi->ecc_eng.priv = snandc; 1552 1553 ret = nand_ecc_register_on_host_hw_engine(&snandc->qspi->ecc_eng); 1554 if (ret) { 1555 dev_err(&pdev->dev, "failed to register ecc engine:%d\n", ret); 1556 goto err_spi_init; 1557 } 1558 1559 ctlr->num_chipselect = QPIC_QSPI_NUM_CS; 1560 ctlr->mem_ops = &qcom_spi_mem_ops; 1561 ctlr->mem_caps = &qcom_spi_mem_caps; 1562 ctlr->dev.of_node = pdev->dev.of_node; 1563 ctlr->mode_bits = SPI_TX_DUAL | SPI_RX_DUAL | 1564 SPI_TX_QUAD | SPI_RX_QUAD; 1565 1566 ret = spi_register_controller(ctlr); 1567 if (ret) { 1568 dev_err(&pdev->dev, "spi_register_controller failed.\n"); 1569 goto err_spi_init; 1570 } 1571 1572 return 0; 1573 1574 err_spi_init: 1575 qcom_nandc_unalloc(snandc); 1576 err_snand_alloc: 1577 clk_disable_unprepare(snandc->qspi->iomacro_clk); 1578 err_dis_iom_clk: 1579 clk_disable_unprepare(snandc->aon_clk); 1580 err_dis_aon_clk: 1581 clk_disable_unprepare(snandc->core_clk); 1582 err_dis_core_clk: 1583 dma_unmap_resource(dev, res->start, resource_size(res), 1584 DMA_BIDIRECTIONAL, 0); 1585 return ret; 1586 } 1587 1588 static void qcom_spi_remove(struct platform_device *pdev) 1589 { 1590 struct spi_controller *ctlr = platform_get_drvdata(pdev); 1591 struct qcom_nand_controller *snandc = spi_controller_get_devdata(ctlr); 1592 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1593 1594 spi_unregister_controller(ctlr); 1595 1596 qcom_nandc_unalloc(snandc); 1597 1598 clk_disable_unprepare(snandc->aon_clk); 1599 clk_disable_unprepare(snandc->core_clk); 1600 clk_disable_unprepare(snandc->qspi->iomacro_clk); 1601 1602 dma_unmap_resource(&pdev->dev, snandc->base_dma, resource_size(res), 1603 DMA_BIDIRECTIONAL, 0); 1604 } 1605 1606 static const struct qcom_nandc_props ipq9574_snandc_props = { 1607 .dev_cmd_reg_start = 0x7000, 1608 .supports_bam = true, 1609 }; 1610 1611 static const struct of_device_id qcom_snandc_of_match[] = { 1612 { 1613 .compatible = "qcom,ipq9574-snand", 1614 .data = &ipq9574_snandc_props, 1615 }, 1616 {} 1617 }; 1618 MODULE_DEVICE_TABLE(of, qcom_snandc_of_match); 1619 1620 static struct platform_driver qcom_spi_driver = { 1621 .driver = { 1622 .name = "qcom_snand", 1623 .of_match_table = qcom_snandc_of_match, 1624 }, 1625 .probe = qcom_spi_probe, 1626 .remove = qcom_spi_remove, 1627 }; 1628 module_platform_driver(qcom_spi_driver); 1629 1630 MODULE_DESCRIPTION("SPI driver for QPIC QSPI cores"); 1631 MODULE_AUTHOR("Md Sadre Alam <quic_mdalam@quicinc.com>"); 1632 MODULE_LICENSE("GPL"); 1633 1634