Lines Matching +full:spi +full:- +full:nand

1 // SPDX-License-Identifier: (GPL-2.0-only OR MIT)
5 * Driver for the SPI Mode of Amlogic Flash Controller
13 #include <linux/clk-provider.h>
14 #include <linux/dma-mapping.h>
24 #include <linux/spi/spi-mem.h>
44 /* 4 bits support 4 chip select, high false, low select but spi support 2*/
144 /* !!! PCB and SPI-NAND chip limitations */
150 /* SPI-FLASH R/W operation cmd */
218 static struct aml_sfc *nand_to_aml_sfc(struct nand_device *nand) in nand_to_aml_sfc() argument
220 struct nand_ecc_engine *eng = nand->ecc.engine; in nand_to_aml_sfc()
227 return sfc->priv; in aml_sfc_to_ecc_ctx()
236 * The SPINAND flash controller employs a two-stage pipeline: in aml_sfc_wait_cmd_finish()
247 regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, 0)); in aml_sfc_wait_cmd_finish()
248 regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, 0)); in aml_sfc_wait_cmd_finish()
251 ret = regmap_read_poll_timeout(sfc->regmap_base, SFC_CMD, cmd_size, in aml_sfc_wait_cmd_finish()
255 dev_err(sfc->dev, "wait for empty CMD FIFO time out\n"); in aml_sfc_wait_cmd_finish()
264 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(CS_NONE, idle_cycle)); in aml_sfc_pre_transfer()
268 return regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, cs2clk_cycle)); in aml_sfc_pre_transfer()
275 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_IDLE(sfc->cs_sel, clk2cs_cycle)); in aml_sfc_end_transfer()
290 return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, in aml_sfc_set_bus_width()
303 ret = aml_sfc_set_bus_width(sfc, op->cmd.buswidth, CMD_LANE); in aml_sfc_send_cmd()
307 for (i = 0; i < op->cmd.nbytes; i++) { in aml_sfc_send_cmd()
308 val = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff; in aml_sfc_send_cmd()
309 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_COMMAND(sfc->cs_sel, val)); in aml_sfc_send_cmd()
322 ret = aml_sfc_set_bus_width(sfc, op->addr.buswidth, ADDR_LANE); in aml_sfc_send_addr()
326 for (i = 0; i < op->addr.nbytes; i++) { in aml_sfc_send_addr()
327 val = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff; in aml_sfc_send_addr()
329 ret = regmap_write(sfc->regmap_base, SFC_CMD, CMD_ADDR(sfc->cs_sel, val)); in aml_sfc_send_addr()
339 switch (op->cmd.opcode) { in aml_sfc_is_xio_op()
364 if (op->dummy.nbytes) { in aml_sfc_send_cmd_addr_dummy()
367 dummy_cycle = op->dummy.nbytes * 8 / op->data.buswidth; in aml_sfc_send_cmd_addr_dummy()
369 dummy_cycle = op->dummy.nbytes * 8; in aml_sfc_send_cmd_addr_dummy()
370 cmd = CMD_DUMMY(sfc->cs_sel, dummy_cycle - 1); in aml_sfc_send_cmd_addr_dummy()
371 return regmap_write(sfc->regmap_base, SFC_CMD, cmd); in aml_sfc_send_cmd_addr_dummy()
379 switch (op->cmd.opcode) { in aml_sfc_is_snand_hwecc_page_op()
392 if (sfc->flags & SFC_HWECC) in aml_sfc_is_snand_hwecc_page_op()
410 sfc->daddr = dma_map_single(sfc->dev, databuf, datalen, dir); in aml_sfc_dma_buffer_setup()
411 ret = dma_mapping_error(sfc->dev, sfc->daddr); in aml_sfc_dma_buffer_setup()
413 dev_err(sfc->dev, "DMA mapping error\n"); in aml_sfc_dma_buffer_setup()
417 cmd = CMD_DATA_ADDRL(sfc->daddr); in aml_sfc_dma_buffer_setup()
418 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd); in aml_sfc_dma_buffer_setup()
422 cmd = CMD_DATA_ADDRH(sfc->daddr); in aml_sfc_dma_buffer_setup()
423 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd); in aml_sfc_dma_buffer_setup()
428 sfc->iaddr = dma_map_single(sfc->dev, infobuf, infolen, dir); in aml_sfc_dma_buffer_setup()
429 ret = dma_mapping_error(sfc->dev, sfc->iaddr); in aml_sfc_dma_buffer_setup()
431 dev_err(sfc->dev, "DMA mapping error\n"); in aml_sfc_dma_buffer_setup()
432 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir); in aml_sfc_dma_buffer_setup()
436 sfc->info_bytes = infolen; in aml_sfc_dma_buffer_setup()
437 cmd = CMD_INFO_ADDRL(sfc->iaddr); in aml_sfc_dma_buffer_setup()
438 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd); in aml_sfc_dma_buffer_setup()
442 cmd = CMD_INFO_ADDRH(sfc->iaddr); in aml_sfc_dma_buffer_setup()
443 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd); in aml_sfc_dma_buffer_setup()
451 dma_unmap_single(sfc->dev, sfc->iaddr, datalen, dir); in aml_sfc_dma_buffer_setup()
453 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir); in aml_sfc_dma_buffer_setup()
462 dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir); in aml_sfc_dma_buffer_release()
464 dma_unmap_single(sfc->dev, sfc->iaddr, infolen, dir); in aml_sfc_dma_buffer_release()
465 sfc->info_bytes = 0; in aml_sfc_dma_buffer_release()
482 if (aml_sfc_dma_buffer_is_safe(op->data.buf.in)) in aml_get_dma_safe_input_buf()
483 return op->data.buf.in; in aml_get_dma_safe_input_buf()
485 return kzalloc(op->data.nbytes, GFP_KERNEL); in aml_get_dma_safe_input_buf()
490 if (WARN_ON(op->data.dir != SPI_MEM_DATA_IN) || WARN_ON(!buf)) in aml_sfc_put_dma_safe_input_buf()
493 if (buf == op->data.buf.in) in aml_sfc_put_dma_safe_input_buf()
496 memcpy(op->data.buf.in, buf, op->data.nbytes); in aml_sfc_put_dma_safe_input_buf()
502 if (aml_sfc_dma_buffer_is_safe(op->data.buf.out)) in aml_sfc_get_dma_safe_output_buf()
503 return (void *)op->data.buf.out; in aml_sfc_get_dma_safe_output_buf()
505 return kmemdup(op->data.buf.out, op->data.nbytes, GFP_KERNEL); in aml_sfc_get_dma_safe_output_buf()
510 if (WARN_ON(op->data.dir != SPI_MEM_DATA_OUT) || WARN_ON(!buf)) in aml_sfc_put_dma_safe_output_buf()
513 if (buf != op->data.buf.out) in aml_sfc_put_dma_safe_output_buf()
521 /* For each byte we wait for (8 cycles / buswidth) of the SPI clock. */ in aml_sfc_cal_timeout_cycle()
522 ms = 8 * MSEC_PER_SEC * op->data.nbytes / op->data.buswidth; in aml_sfc_cal_timeout_cycle()
523 do_div(ms, sfc->bus_rate / DEFAULT_BUS_CYCLE); in aml_sfc_cal_timeout_cycle()
544 info = sfc->info_buf; in aml_sfc_check_ecc_pages_valid()
546 info += raw ? 0 : ecc_cfg->nsteps - 1; in aml_sfc_check_ecc_pages_valid()
552 dma_sync_single_for_cpu(sfc->dev, sfc->iaddr, sfc->info_bytes, in aml_sfc_check_ecc_pages_valid()
566 if (!op->data.nbytes) in aml_sfc_raw_io_op()
569 conf = (op->data.nbytes >> RAW_SIZE_BW) << __ffs(RAW_EXT_SIZE); in aml_sfc_raw_io_op()
570 ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf); in aml_sfc_raw_io_op()
574 if (op->data.dir == SPI_MEM_DATA_IN) { in aml_sfc_raw_io_op()
579 ret = -ENOMEM; in aml_sfc_raw_io_op()
583 cmd |= CMD_NAND2MEM(0, (op->data.nbytes & RAW_SIZE)); in aml_sfc_raw_io_op()
584 } else if (op->data.dir == SPI_MEM_DATA_OUT) { in aml_sfc_raw_io_op()
589 ret = -ENOMEM; in aml_sfc_raw_io_op()
593 cmd |= CMD_MEM2NAND(0, (op->data.nbytes & RAW_SIZE)); in aml_sfc_raw_io_op()
598 ret = aml_sfc_dma_buffer_setup(sfc, buf, op->data.nbytes, in aml_sfc_raw_io_op()
599 is_datain ? sfc->info_buf : NULL, in aml_sfc_raw_io_op()
605 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd); in aml_sfc_raw_io_op()
617 if (op->data.dir == SPI_MEM_DATA_IN) in aml_sfc_raw_io_op()
619 else if (op->data.dir == SPI_MEM_DATA_OUT) in aml_sfc_raw_io_op()
622 aml_sfc_dma_buffer_release(sfc, op->data.nbytes, in aml_sfc_raw_io_op()
643 for (i = 0, count = 0; i < ecc_cfg->nsteps; i++, count += step_size) { in aml_sfc_set_user_byte()
658 for (i = 0, count = 0; i < ecc_cfg->nsteps; i++, count += ECC_BCH8_INFO_BYTES) { in aml_sfc_get_user_byte()
673 sfc->ecc_stats.failed = 0; in aml_sfc_check_hwecc_status()
674 sfc->ecc_stats.bitflips = 0; in aml_sfc_check_hwecc_status()
675 sfc->ecc_stats.corrected = 0; in aml_sfc_check_hwecc_status()
677 for (i = 0, info = info_buf; i < ecc_cfg->nsteps; i++, info++) { in aml_sfc_check_hwecc_status()
681 sfc->ecc_stats.corrected += per_sector_bitflips; in aml_sfc_check_hwecc_status()
685 return -EBADMSG; in aml_sfc_check_hwecc_status()
700 page_size = ecc_cfg->stepsize * ecc_cfg->nsteps; in aml_sfc_read_page_hwecc()
701 data_len = page_size + ecc_cfg->oobsize; in aml_sfc_read_page_hwecc()
702 info_len = ecc_cfg->nsteps * ECC_PER_INFO_BYTE; in aml_sfc_read_page_hwecc()
704 ret = aml_sfc_dma_buffer_setup(sfc, sfc->data_buf, data_len, in aml_sfc_read_page_hwecc()
705 sfc->info_buf, info_len, DMA_FROM_DEVICE); in aml_sfc_read_page_hwecc()
709 cmd |= CMD_NAND2MEM(ecc_cfg->bch, ecc_cfg->nsteps); in aml_sfc_read_page_hwecc()
710 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd); in aml_sfc_read_page_hwecc()
723 ret = aml_sfc_check_hwecc_status(sfc, sfc->info_buf); in aml_sfc_read_page_hwecc()
725 sfc->ecc_stats.failed++; in aml_sfc_read_page_hwecc()
727 sfc->ecc_stats.bitflips = ret; in aml_sfc_read_page_hwecc()
729 if (sfc->flags & SFC_DATA_ONLY) { in aml_sfc_read_page_hwecc()
730 memcpy(op->data.buf.in, sfc->data_buf, page_size); in aml_sfc_read_page_hwecc()
731 } else if (sfc->flags & SFC_OOB_ONLY) { in aml_sfc_read_page_hwecc()
732 aml_sfc_get_user_byte(sfc, sfc->info_buf, op->data.buf.in); in aml_sfc_read_page_hwecc()
733 } else if (sfc->flags & SFC_DATA_OOB) { in aml_sfc_read_page_hwecc()
734 memcpy(op->data.buf.in, sfc->data_buf, page_size); in aml_sfc_read_page_hwecc()
735 aml_sfc_get_user_byte(sfc, sfc->info_buf, op->data.buf.in + page_size); in aml_sfc_read_page_hwecc()
753 page_size = ecc_cfg->stepsize * ecc_cfg->nsteps; in aml_sfc_write_page_hwecc()
754 data_len = page_size + ecc_cfg->oobsize; in aml_sfc_write_page_hwecc()
755 info_len = ecc_cfg->nsteps * ECC_PER_INFO_BYTE; in aml_sfc_write_page_hwecc()
757 memset(sfc->info_buf, ECC_PATTERN, ecc_cfg->oobsize); in aml_sfc_write_page_hwecc()
758 memcpy(sfc->data_buf, op->data.buf.out, page_size); in aml_sfc_write_page_hwecc()
760 if (!(sfc->flags & SFC_DATA_ONLY)) { in aml_sfc_write_page_hwecc()
761 if (sfc->flags & SFC_AUTO_OOB) in aml_sfc_write_page_hwecc()
762 aml_sfc_set_user_byte(sfc, sfc->info_buf, in aml_sfc_write_page_hwecc()
763 (u8 *)op->data.buf.out + page_size, 1); in aml_sfc_write_page_hwecc()
765 aml_sfc_set_user_byte(sfc, sfc->info_buf, in aml_sfc_write_page_hwecc()
766 (u8 *)op->data.buf.out + page_size, 0); in aml_sfc_write_page_hwecc()
769 ret = aml_sfc_dma_buffer_setup(sfc, sfc->data_buf, data_len, in aml_sfc_write_page_hwecc()
770 sfc->info_buf, info_len, DMA_TO_DEVICE); in aml_sfc_write_page_hwecc()
774 cmd |= CMD_MEM2NAND(ecc_cfg->bch, ecc_cfg->nsteps); in aml_sfc_write_page_hwecc()
775 ret = regmap_write(sfc->regmap_base, SFC_CMD, cmd); in aml_sfc_write_page_hwecc()
796 struct spi_device *spi; in aml_sfc_exec_op() local
800 sfc = spi_controller_get_devdata(mem->spi->controller); in aml_sfc_exec_op()
802 spi = mem->spi; in aml_sfc_exec_op()
803 sfc->cs_sel = spi->chip_select[0] ? CS_1 : CS_0; in aml_sfc_exec_op()
805 dev_dbg(sfc->dev, "cmd:0x%02x - addr:%08llX@%d:%u - dummy:%d:%u - data:%d:%u", in aml_sfc_exec_op()
806 op->cmd.opcode, op->addr.val, op->addr.buswidth, op->addr.nbytes, in aml_sfc_exec_op()
807 op->dummy.buswidth, op->dummy.nbytes, op->data.buswidth, op->data.nbytes); in aml_sfc_exec_op()
817 ret = aml_sfc_set_bus_width(sfc, op->data.buswidth, DATA_LANE); in aml_sfc_exec_op()
822 ecc_cfg && !(sfc->flags & SFC_RAW_RW)) { in aml_sfc_exec_op()
823 if (op->data.dir == SPI_MEM_DATA_IN) in aml_sfc_exec_op()
837 sfc = spi_controller_get_devdata(mem->spi->controller); in aml_sfc_adjust_op_size()
841 if (op->data.nbytes > ecc_cfg->stepsize * ECC_BCH_MAX_SECT_SIZE) in aml_sfc_adjust_op_size()
842 return -EOPNOTSUPP; in aml_sfc_adjust_op_size()
843 } else if (op->data.nbytes & ~RAW_MAX_RW_SIZE_MASK) { in aml_sfc_adjust_op_size()
844 return -EOPNOTSUPP; in aml_sfc_adjust_op_size()
858 struct nand_device *nand = mtd_to_nanddev(mtd); in aml_sfc_layout_ecc() local
860 if (section >= nand->ecc.ctx.nsteps) in aml_sfc_layout_ecc()
861 return -ERANGE; in aml_sfc_layout_ecc()
863 oobregion->offset = ECC_BCH8_USER_BYTES + (section * ECC_BCH8_INFO_BYTES); in aml_sfc_layout_ecc()
864 oobregion->length = ECC_BCH8_PARITY_BYTES; in aml_sfc_layout_ecc()
872 struct nand_device *nand = mtd_to_nanddev(mtd); in aml_sfc_ooblayout_free() local
874 if (section >= nand->ecc.ctx.nsteps) in aml_sfc_ooblayout_free()
875 return -ERANGE; in aml_sfc_ooblayout_free()
877 oobregion->offset = section * ECC_BCH8_INFO_BYTES; in aml_sfc_ooblayout_free()
878 oobregion->length = ECC_BCH8_USER_BYTES; in aml_sfc_ooblayout_free()
888 static int aml_spi_settings(struct aml_sfc *sfc, struct spi_device *spi) in aml_spi_settings() argument
892 if (spi->mode & SPI_CPHA) in aml_spi_settings()
895 if (spi->mode & SPI_CPOL) in aml_spi_settings()
898 conf |= FIELD_PREP(RXADJ, sfc->rx_adj); in aml_spi_settings()
900 return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, in aml_spi_settings()
905 static int aml_set_spi_clk(struct aml_sfc *sfc, struct spi_device *spi) in aml_set_spi_clk() argument
910 if (spi->max_speed_hz > SFC_MAX_FREQUENCY) in aml_set_spi_clk()
912 else if (!spi->max_speed_hz) in aml_set_spi_clk()
914 else if (spi->max_speed_hz < SFC_MIN_FREQUENCY) in aml_set_spi_clk()
917 speed_hz = spi->max_speed_hz; in aml_set_spi_clk()
919 /* The SPI clock is generated by dividing the bus clock by four by default. */ in aml_set_spi_clk()
920 ret = regmap_write(sfc->regmap_base, SFC_CFG, (DEFAULT_BUS_CYCLE - 1)); in aml_set_spi_clk()
922 dev_err(sfc->dev, "failed to set bus cycle\n"); in aml_set_spi_clk()
926 return clk_set_rate(sfc->core_clk, speed_hz * DEFAULT_BUS_CYCLE); in aml_set_spi_clk()
929 static int aml_sfc_setup(struct spi_device *spi) in aml_sfc_setup() argument
934 sfc = spi_controller_get_devdata(spi->controller); in aml_sfc_setup()
935 ret = aml_spi_settings(sfc, spi); in aml_sfc_setup()
939 ret = aml_set_spi_clk(sfc, spi); in aml_sfc_setup()
943 sfc->bus_rate = clk_get_rate(sfc->core_clk); in aml_sfc_setup()
948 static int aml_sfc_ecc_init_ctx(struct nand_device *nand) in aml_sfc_ecc_init_ctx() argument
950 struct mtd_info *mtd = nanddev_to_mtd(nand); in aml_sfc_ecc_init_ctx()
951 struct aml_sfc *sfc = nand_to_aml_sfc(nand); in aml_sfc_ecc_init_ctx()
953 const struct aml_sfc_caps *caps = sfc->caps; in aml_sfc_ecc_init_ctx()
954 struct aml_sfc_ecc_cfg *ecc_caps = caps->ecc_caps; in aml_sfc_ecc_init_ctx()
957 ecc_step_size = nand->ecc.user_conf.step_size; in aml_sfc_ecc_init_ctx()
958 ecc_strength = nand->ecc.user_conf.strength; in aml_sfc_ecc_init_ctx()
960 for (i = 0; i < caps->num_ecc_caps; i++) { in aml_sfc_ecc_init_ctx()
962 nand->ecc.ctx.conf.step_size = ecc_step_size; in aml_sfc_ecc_init_ctx()
963 nand->ecc.ctx.conf.flags |= BIT(ecc_caps[i].bch); in aml_sfc_ecc_init_ctx()
967 nand->ecc.ctx.conf.strength = ecc_strength; in aml_sfc_ecc_init_ctx()
970 if (!nand->ecc.ctx.conf.step_size) { in aml_sfc_ecc_init_ctx()
971 nand->ecc.ctx.conf.step_size = ECC_BCH8_DEFAULT_STEP; in aml_sfc_ecc_init_ctx()
972 nand->ecc.ctx.conf.flags |= BIT(ECC_DEFAULT_BCH_MODE); in aml_sfc_ecc_init_ctx()
975 if (!nand->ecc.ctx.conf.strength) in aml_sfc_ecc_init_ctx()
976 nand->ecc.ctx.conf.strength = ECC_BCH8_STRENGTH; in aml_sfc_ecc_init_ctx()
978 nand->ecc.ctx.nsteps = nand->memorg.pagesize / nand->ecc.ctx.conf.step_size; in aml_sfc_ecc_init_ctx()
979 nand->ecc.ctx.total = nand->ecc.ctx.nsteps * ECC_BCH8_PARITY_BYTES; in aml_sfc_ecc_init_ctx()
982 if ((nand->memorg.pagesize % nand->ecc.ctx.conf.step_size) || in aml_sfc_ecc_init_ctx()
983 (nand->memorg.oobsize < (nand->ecc.ctx.total + in aml_sfc_ecc_init_ctx()
984 nand->ecc.ctx.nsteps * ECC_BCH8_USER_BYTES))) in aml_sfc_ecc_init_ctx()
985 return -EOPNOTSUPP; in aml_sfc_ecc_init_ctx()
987 nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; in aml_sfc_ecc_init_ctx()
991 return -ENOMEM; in aml_sfc_ecc_init_ctx()
993 ecc_cfg->stepsize = nand->ecc.ctx.conf.step_size; in aml_sfc_ecc_init_ctx()
994 ecc_cfg->nsteps = nand->ecc.ctx.nsteps; in aml_sfc_ecc_init_ctx()
995 ecc_cfg->strength = nand->ecc.ctx.conf.strength; in aml_sfc_ecc_init_ctx()
996 ecc_cfg->oobsize = nand->memorg.oobsize; in aml_sfc_ecc_init_ctx()
997 ecc_cfg->bch = nand->ecc.ctx.conf.flags & BIT(ECC_DEFAULT_BCH_MODE) ? 1 : 2; in aml_sfc_ecc_init_ctx()
999 nand->ecc.ctx.priv = ecc_cfg; in aml_sfc_ecc_init_ctx()
1000 sfc->priv = (void *)ecc_cfg; in aml_sfc_ecc_init_ctx()
1003 sfc->flags |= SFC_HWECC; in aml_sfc_ecc_init_ctx()
1008 static void aml_sfc_ecc_cleanup_ctx(struct nand_device *nand) in aml_sfc_ecc_cleanup_ctx() argument
1010 struct aml_sfc *sfc = nand_to_aml_sfc(nand); in aml_sfc_ecc_cleanup_ctx()
1012 sfc->flags &= ~(SFC_HWECC); in aml_sfc_ecc_cleanup_ctx()
1013 kfree(nand->ecc.ctx.priv); in aml_sfc_ecc_cleanup_ctx()
1014 sfc->priv = NULL; in aml_sfc_ecc_cleanup_ctx()
1017 static int aml_sfc_ecc_prepare_io_req(struct nand_device *nand, in aml_sfc_ecc_prepare_io_req() argument
1020 struct aml_sfc *sfc = nand_to_aml_sfc(nand); in aml_sfc_ecc_prepare_io_req()
1021 struct spinand_device *spinand = nand_to_spinand(nand); in aml_sfc_ecc_prepare_io_req()
1023 sfc->flags &= ~SFC_XFER_MDOE_MASK; in aml_sfc_ecc_prepare_io_req()
1025 if (req->datalen && !req->ooblen) in aml_sfc_ecc_prepare_io_req()
1026 sfc->flags |= SFC_DATA_ONLY; in aml_sfc_ecc_prepare_io_req()
1027 else if (!req->datalen && req->ooblen) in aml_sfc_ecc_prepare_io_req()
1028 sfc->flags |= SFC_OOB_ONLY; in aml_sfc_ecc_prepare_io_req()
1029 else if (req->datalen && req->ooblen) in aml_sfc_ecc_prepare_io_req()
1030 sfc->flags |= SFC_DATA_OOB; in aml_sfc_ecc_prepare_io_req()
1032 if (req->mode == MTD_OPS_RAW) in aml_sfc_ecc_prepare_io_req()
1033 sfc->flags |= SFC_RAW_RW; in aml_sfc_ecc_prepare_io_req()
1034 else if (req->mode == MTD_OPS_AUTO_OOB) in aml_sfc_ecc_prepare_io_req()
1035 sfc->flags |= SFC_AUTO_OOB; in aml_sfc_ecc_prepare_io_req()
1037 memset(spinand->oobbuf, 0xff, nanddev_per_page_oobsize(nand)); in aml_sfc_ecc_prepare_io_req()
1042 static int aml_sfc_ecc_finish_io_req(struct nand_device *nand, in aml_sfc_ecc_finish_io_req() argument
1045 struct aml_sfc *sfc = nand_to_aml_sfc(nand); in aml_sfc_ecc_finish_io_req()
1046 struct mtd_info *mtd = nanddev_to_mtd(nand); in aml_sfc_ecc_finish_io_req()
1048 if (req->mode == MTD_OPS_RAW || req->type == NAND_PAGE_WRITE) in aml_sfc_ecc_finish_io_req()
1051 if (sfc->ecc_stats.failed) in aml_sfc_ecc_finish_io_req()
1052 mtd->ecc_stats.failed++; in aml_sfc_ecc_finish_io_req()
1054 mtd->ecc_stats.corrected += sfc->ecc_stats.corrected; in aml_sfc_ecc_finish_io_req()
1056 return sfc->ecc_stats.failed ? -EBADMSG : sfc->ecc_stats.bitflips; in aml_sfc_ecc_finish_io_req()
1072 sfc->gate_clk = devm_clk_get_enabled(sfc->dev, "gate"); in aml_sfc_clk_init()
1073 if (IS_ERR(sfc->gate_clk)) { in aml_sfc_clk_init()
1074 dev_err(sfc->dev, "unable to enable gate clk\n"); in aml_sfc_clk_init()
1075 return PTR_ERR(sfc->gate_clk); in aml_sfc_clk_init()
1078 sfc->core_clk = devm_clk_get_enabled(sfc->dev, "core"); in aml_sfc_clk_init()
1079 if (IS_ERR(sfc->core_clk)) { in aml_sfc_clk_init()
1080 dev_err(sfc->dev, "unable to enable core clk\n"); in aml_sfc_clk_init()
1081 return PTR_ERR(sfc->core_clk); in aml_sfc_clk_init()
1084 return clk_set_rate(sfc->core_clk, SFC_BUS_DEFAULT_CLK); in aml_sfc_clk_init()
1089 clk_disable_unprepare(sfc->core_clk); in aml_sfc_disable_clk()
1090 clk_disable_unprepare(sfc->gate_clk); in aml_sfc_disable_clk()
1097 struct device_node *np = pdev->dev.of_node; in aml_sfc_probe()
1098 struct device *dev = &pdev->dev; in aml_sfc_probe()
1114 return -ENOMEM; in aml_sfc_probe()
1118 sfc->dev = dev; in aml_sfc_probe()
1119 sfc->ctrl = ctrl; in aml_sfc_probe()
1121 sfc->caps = of_device_get_match_data(dev); in aml_sfc_probe()
1122 if (!sfc->caps) in aml_sfc_probe()
1123 return dev_err_probe(dev, -ENODEV, "failed to get device data\n"); in aml_sfc_probe()
1129 sfc->regmap_base = devm_regmap_init_mmio(dev, reg_base, &core_config); in aml_sfc_probe()
1130 if (IS_ERR(sfc->regmap_base)) in aml_sfc_probe()
1131 return dev_err_probe(dev, PTR_ERR(sfc->regmap_base), in aml_sfc_probe()
1134 sfc->data_buf = devm_kzalloc(dev, SFC_BUF_SIZE, GFP_KERNEL); in aml_sfc_probe()
1135 if (!sfc->data_buf) in aml_sfc_probe()
1136 return -ENOMEM; in aml_sfc_probe()
1137 sfc->info_buf = (__le64 *)(sfc->data_buf + SFC_DATABUF_SIZE); in aml_sfc_probe()
1143 /* Enable Amlogic flash controller spi mode */ in aml_sfc_probe()
1144 ret = regmap_write(sfc->regmap_base, SFC_SPI_CFG, SPI_MODE_EN); in aml_sfc_probe()
1146 dev_err(dev, "failed to enable SPI mode\n"); in aml_sfc_probe()
1150 ret = dma_set_mask(sfc->dev, DMA_BIT_MASK(32)); in aml_sfc_probe()
1152 dev_err(sfc->dev, "failed to set dma mask\n"); in aml_sfc_probe()
1156 sfc->ecc_eng.dev = &pdev->dev; in aml_sfc_probe()
1157 sfc->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; in aml_sfc_probe()
1158 sfc->ecc_eng.ops = &aml_sfc_ecc_engine_ops; in aml_sfc_probe()
1159 sfc->ecc_eng.priv = sfc; in aml_sfc_probe()
1161 ret = nand_ecc_register_on_host_hw_engine(&sfc->ecc_eng); in aml_sfc_probe()
1163 dev_err(&pdev->dev, "failed to register Aml host ecc engine.\n"); in aml_sfc_probe()
1167 ret = of_property_read_u32(np, "amlogic,rx-adj", &val); in aml_sfc_probe()
1169 sfc->rx_adj = val; in aml_sfc_probe()
1171 ctrl->dev.of_node = np; in aml_sfc_probe()
1172 ctrl->mem_ops = &aml_sfc_mem_ops; in aml_sfc_probe()
1173 ctrl->mem_caps = &aml_sfc_mem_caps; in aml_sfc_probe()
1174 ctrl->setup = aml_sfc_setup; in aml_sfc_probe()
1175 ctrl->mode_bits = SPI_TX_QUAD | SPI_TX_DUAL | SPI_RX_QUAD | in aml_sfc_probe()
1177 ctrl->max_speed_hz = SFC_MAX_FREQUENCY; in aml_sfc_probe()
1178 ctrl->min_speed_hz = SFC_MIN_FREQUENCY; in aml_sfc_probe()
1179 ctrl->num_chipselect = SFC_MAX_CS_NUM; in aml_sfc_probe()
1203 .compatible = "amlogic,a4-spifc",
1220 MODULE_DESCRIPTION("Amlogic SPI Flash Controller driver");