1*c93c6132SChuanhong Guo // SPDX-License-Identifier: GPL-2.0 2*c93c6132SChuanhong Guo /* 3*c93c6132SChuanhong Guo * Author: 4*c93c6132SChuanhong Guo * Chuanhong Guo <gch981213@gmail.com> 5*c93c6132SChuanhong Guo */ 6*c93c6132SChuanhong Guo 7*c93c6132SChuanhong Guo #include <linux/device.h> 8*c93c6132SChuanhong Guo #include <linux/kernel.h> 9*c93c6132SChuanhong Guo #include <linux/mtd/spinand.h> 10*c93c6132SChuanhong Guo 11*c93c6132SChuanhong Guo #define SPINAND_MFR_GIGADEVICE 0xC8 12*c93c6132SChuanhong Guo #define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4) 13*c93c6132SChuanhong Guo #define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4) 14*c93c6132SChuanhong Guo 15*c93c6132SChuanhong Guo static SPINAND_OP_VARIANTS(read_cache_variants, 16*c93c6132SChuanhong Guo SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0), 17*c93c6132SChuanhong Guo SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0), 18*c93c6132SChuanhong Guo SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0), 19*c93c6132SChuanhong Guo SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0), 20*c93c6132SChuanhong Guo SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0), 21*c93c6132SChuanhong Guo SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0)); 22*c93c6132SChuanhong Guo 23*c93c6132SChuanhong Guo static SPINAND_OP_VARIANTS(write_cache_variants, 24*c93c6132SChuanhong Guo SPINAND_PROG_LOAD_X4(true, 0, NULL, 0), 25*c93c6132SChuanhong Guo SPINAND_PROG_LOAD(true, 0, NULL, 0)); 26*c93c6132SChuanhong Guo 27*c93c6132SChuanhong Guo static SPINAND_OP_VARIANTS(update_cache_variants, 28*c93c6132SChuanhong Guo SPINAND_PROG_LOAD_X4(false, 0, NULL, 0), 29*c93c6132SChuanhong Guo SPINAND_PROG_LOAD(false, 0, NULL, 0)); 30*c93c6132SChuanhong Guo 31*c93c6132SChuanhong Guo static int gd5fxgq4xa_ooblayout_ecc(struct mtd_info *mtd, int section, 32*c93c6132SChuanhong Guo struct mtd_oob_region *region) 33*c93c6132SChuanhong Guo { 34*c93c6132SChuanhong Guo if (section > 3) 35*c93c6132SChuanhong Guo return -ERANGE; 36*c93c6132SChuanhong Guo 37*c93c6132SChuanhong Guo region->offset = (16 * section) + 8; 38*c93c6132SChuanhong Guo region->length = 8; 39*c93c6132SChuanhong Guo 40*c93c6132SChuanhong Guo return 0; 41*c93c6132SChuanhong Guo } 42*c93c6132SChuanhong Guo 43*c93c6132SChuanhong Guo static int gd5fxgq4xa_ooblayout_free(struct mtd_info *mtd, int section, 44*c93c6132SChuanhong Guo struct mtd_oob_region *region) 45*c93c6132SChuanhong Guo { 46*c93c6132SChuanhong Guo if (section > 3) 47*c93c6132SChuanhong Guo return -ERANGE; 48*c93c6132SChuanhong Guo 49*c93c6132SChuanhong Guo if (section) { 50*c93c6132SChuanhong Guo region->offset = 16 * section; 51*c93c6132SChuanhong Guo region->length = 8; 52*c93c6132SChuanhong Guo } else { 53*c93c6132SChuanhong Guo /* section 0 has one byte reserved for bad block mark */ 54*c93c6132SChuanhong Guo region->offset = 1; 55*c93c6132SChuanhong Guo region->length = 7; 56*c93c6132SChuanhong Guo } 57*c93c6132SChuanhong Guo return 0; 58*c93c6132SChuanhong Guo } 59*c93c6132SChuanhong Guo 60*c93c6132SChuanhong Guo static int gd5fxgq4xa_ecc_get_status(struct spinand_device *spinand, 61*c93c6132SChuanhong Guo u8 status) 62*c93c6132SChuanhong Guo { 63*c93c6132SChuanhong Guo switch (status & STATUS_ECC_MASK) { 64*c93c6132SChuanhong Guo case STATUS_ECC_NO_BITFLIPS: 65*c93c6132SChuanhong Guo return 0; 66*c93c6132SChuanhong Guo 67*c93c6132SChuanhong Guo case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS: 68*c93c6132SChuanhong Guo /* 1-7 bits are flipped. return the maximum. */ 69*c93c6132SChuanhong Guo return 7; 70*c93c6132SChuanhong Guo 71*c93c6132SChuanhong Guo case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS: 72*c93c6132SChuanhong Guo return 8; 73*c93c6132SChuanhong Guo 74*c93c6132SChuanhong Guo case STATUS_ECC_UNCOR_ERROR: 75*c93c6132SChuanhong Guo return -EBADMSG; 76*c93c6132SChuanhong Guo 77*c93c6132SChuanhong Guo default: 78*c93c6132SChuanhong Guo break; 79*c93c6132SChuanhong Guo } 80*c93c6132SChuanhong Guo 81*c93c6132SChuanhong Guo return -EINVAL; 82*c93c6132SChuanhong Guo } 83*c93c6132SChuanhong Guo 84*c93c6132SChuanhong Guo static const struct mtd_ooblayout_ops gd5fxgq4xa_ooblayout = { 85*c93c6132SChuanhong Guo .ecc = gd5fxgq4xa_ooblayout_ecc, 86*c93c6132SChuanhong Guo .free = gd5fxgq4xa_ooblayout_free, 87*c93c6132SChuanhong Guo }; 88*c93c6132SChuanhong Guo 89*c93c6132SChuanhong Guo static const struct spinand_info gigadevice_spinand_table[] = { 90*c93c6132SChuanhong Guo SPINAND_INFO("GD5F1GQ4xA", 0xF1, 91*c93c6132SChuanhong Guo NAND_MEMORG(1, 2048, 64, 64, 1024, 1, 1, 1), 92*c93c6132SChuanhong Guo NAND_ECCREQ(8, 512), 93*c93c6132SChuanhong Guo SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 94*c93c6132SChuanhong Guo &write_cache_variants, 95*c93c6132SChuanhong Guo &update_cache_variants), 96*c93c6132SChuanhong Guo 0, 97*c93c6132SChuanhong Guo SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, 98*c93c6132SChuanhong Guo gd5fxgq4xa_ecc_get_status)), 99*c93c6132SChuanhong Guo SPINAND_INFO("GD5F2GQ4xA", 0xF2, 100*c93c6132SChuanhong Guo NAND_MEMORG(1, 2048, 64, 64, 2048, 1, 1, 1), 101*c93c6132SChuanhong Guo NAND_ECCREQ(8, 512), 102*c93c6132SChuanhong Guo SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 103*c93c6132SChuanhong Guo &write_cache_variants, 104*c93c6132SChuanhong Guo &update_cache_variants), 105*c93c6132SChuanhong Guo 0, 106*c93c6132SChuanhong Guo SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, 107*c93c6132SChuanhong Guo gd5fxgq4xa_ecc_get_status)), 108*c93c6132SChuanhong Guo SPINAND_INFO("GD5F4GQ4xA", 0xF4, 109*c93c6132SChuanhong Guo NAND_MEMORG(1, 2048, 64, 64, 4096, 1, 1, 1), 110*c93c6132SChuanhong Guo NAND_ECCREQ(8, 512), 111*c93c6132SChuanhong Guo SPINAND_INFO_OP_VARIANTS(&read_cache_variants, 112*c93c6132SChuanhong Guo &write_cache_variants, 113*c93c6132SChuanhong Guo &update_cache_variants), 114*c93c6132SChuanhong Guo 0, 115*c93c6132SChuanhong Guo SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout, 116*c93c6132SChuanhong Guo gd5fxgq4xa_ecc_get_status)), 117*c93c6132SChuanhong Guo }; 118*c93c6132SChuanhong Guo 119*c93c6132SChuanhong Guo static int gigadevice_spinand_detect(struct spinand_device *spinand) 120*c93c6132SChuanhong Guo { 121*c93c6132SChuanhong Guo u8 *id = spinand->id.data; 122*c93c6132SChuanhong Guo int ret; 123*c93c6132SChuanhong Guo 124*c93c6132SChuanhong Guo /* 125*c93c6132SChuanhong Guo * For GD NANDs, There is an address byte needed to shift in before IDs 126*c93c6132SChuanhong Guo * are read out, so the first byte in raw_id is dummy. 127*c93c6132SChuanhong Guo */ 128*c93c6132SChuanhong Guo if (id[1] != SPINAND_MFR_GIGADEVICE) 129*c93c6132SChuanhong Guo return 0; 130*c93c6132SChuanhong Guo 131*c93c6132SChuanhong Guo ret = spinand_match_and_init(spinand, gigadevice_spinand_table, 132*c93c6132SChuanhong Guo ARRAY_SIZE(gigadevice_spinand_table), 133*c93c6132SChuanhong Guo id[2]); 134*c93c6132SChuanhong Guo if (ret) 135*c93c6132SChuanhong Guo return ret; 136*c93c6132SChuanhong Guo 137*c93c6132SChuanhong Guo return 1; 138*c93c6132SChuanhong Guo } 139*c93c6132SChuanhong Guo 140*c93c6132SChuanhong Guo static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = { 141*c93c6132SChuanhong Guo .detect = gigadevice_spinand_detect, 142*c93c6132SChuanhong Guo }; 143*c93c6132SChuanhong Guo 144*c93c6132SChuanhong Guo const struct spinand_manufacturer gigadevice_spinand_manufacturer = { 145*c93c6132SChuanhong Guo .id = SPINAND_MFR_GIGADEVICE, 146*c93c6132SChuanhong Guo .name = "GigaDevice", 147*c93c6132SChuanhong Guo .ops = &gigadevice_spinand_manuf_ops, 148*c93c6132SChuanhong Guo }; 149