1 /* 2 * Common Flash Interface support: 3 * Generic utility functions not dependant on command set 4 * 5 * Copyright (C) 2002 Red Hat 6 * Copyright (C) 2003 STMicroelectronics Limited 7 * 8 * This code is covered by the GPL. 9 */ 10 11 #include <linux/module.h> 12 #include <linux/types.h> 13 #include <linux/kernel.h> 14 #include <asm/io.h> 15 #include <asm/byteorder.h> 16 17 #include <linux/errno.h> 18 #include <linux/slab.h> 19 #include <linux/delay.h> 20 #include <linux/interrupt.h> 21 #include <linux/mtd/xip.h> 22 #include <linux/mtd/mtd.h> 23 #include <linux/mtd/map.h> 24 #include <linux/mtd/cfi.h> 25 #include <linux/mtd/compatmac.h> 26 27 int __xipram cfi_qry_present(struct map_info *map, __u32 base, 28 struct cfi_private *cfi) 29 { 30 int osf = cfi->interleave * cfi->device_type; /* scale factor */ 31 map_word val[3]; 32 map_word qry[3]; 33 34 qry[0] = cfi_build_cmd('Q', map, cfi); 35 qry[1] = cfi_build_cmd('R', map, cfi); 36 qry[2] = cfi_build_cmd('Y', map, cfi); 37 38 val[0] = map_read(map, base + osf*0x10); 39 val[1] = map_read(map, base + osf*0x11); 40 val[2] = map_read(map, base + osf*0x12); 41 42 if (!map_word_equal(map, qry[0], val[0])) 43 return 0; 44 45 if (!map_word_equal(map, qry[1], val[1])) 46 return 0; 47 48 if (!map_word_equal(map, qry[2], val[2])) 49 return 0; 50 51 return 1; /* "QRY" found */ 52 } 53 EXPORT_SYMBOL_GPL(cfi_qry_present); 54 55 int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, 56 struct cfi_private *cfi) 57 { 58 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 59 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); 60 if (cfi_qry_present(map, base, cfi)) 61 return 1; 62 /* QRY not found probably we deal with some odd CFI chips */ 63 /* Some revisions of some old Intel chips? */ 64 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 65 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); 66 cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); 67 if (cfi_qry_present(map, base, cfi)) 68 return 1; 69 /* ST M29DW chips */ 70 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 71 cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); 72 if (cfi_qry_present(map, base, cfi)) 73 return 1; 74 /* some old SST chips, e.g. 39VF160x/39VF320x */ 75 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 76 cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL); 77 cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL); 78 cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL); 79 if (cfi_qry_present(map, base, cfi)) 80 return 1; 81 /* QRY not found */ 82 return 0; 83 } 84 EXPORT_SYMBOL_GPL(cfi_qry_mode_on); 85 86 void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, 87 struct cfi_private *cfi) 88 { 89 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 90 cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); 91 /* M29W128G flashes require an additional reset command 92 when exit qry mode */ 93 if ((cfi->mfr == CFI_MFR_ST) && (cfi->id == 0x227E || cfi->id == 0x7E)) 94 cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); 95 } 96 EXPORT_SYMBOL_GPL(cfi_qry_mode_off); 97 98 struct cfi_extquery * 99 __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) 100 { 101 struct cfi_private *cfi = map->fldrv_priv; 102 __u32 base = 0; // cfi->chips[0].start; 103 int ofs_factor = cfi->interleave * cfi->device_type; 104 int i; 105 struct cfi_extquery *extp = NULL; 106 107 if (!adr) 108 goto out; 109 110 printk(KERN_INFO "%s Extended Query Table at 0x%4.4X\n", name, adr); 111 112 extp = kmalloc(size, GFP_KERNEL); 113 if (!extp) { 114 printk(KERN_ERR "Failed to allocate memory\n"); 115 goto out; 116 } 117 118 #ifdef CONFIG_MTD_XIP 119 local_irq_disable(); 120 #endif 121 122 /* Switch it into Query Mode */ 123 cfi_qry_mode_on(base, map, cfi); 124 /* Read in the Extended Query Table */ 125 for (i=0; i<size; i++) { 126 ((unsigned char *)extp)[i] = 127 cfi_read_query(map, base+((adr+i)*ofs_factor)); 128 } 129 130 /* Make sure it returns to read mode */ 131 cfi_qry_mode_off(base, map, cfi); 132 133 #ifdef CONFIG_MTD_XIP 134 (void) map_read(map, base); 135 xip_iprefetch(); 136 local_irq_enable(); 137 #endif 138 139 out: return extp; 140 } 141 142 EXPORT_SYMBOL(cfi_read_pri); 143 144 void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup *fixups) 145 { 146 struct map_info *map = mtd->priv; 147 struct cfi_private *cfi = map->fldrv_priv; 148 struct cfi_fixup *f; 149 150 for (f=fixups; f->fixup; f++) { 151 if (((f->mfr == CFI_MFR_ANY) || (f->mfr == cfi->mfr)) && 152 ((f->id == CFI_ID_ANY) || (f->id == cfi->id))) { 153 f->fixup(mtd, f->param); 154 } 155 } 156 } 157 158 EXPORT_SYMBOL(cfi_fixup); 159 160 int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob, 161 loff_t ofs, size_t len, void *thunk) 162 { 163 struct map_info *map = mtd->priv; 164 struct cfi_private *cfi = map->fldrv_priv; 165 unsigned long adr; 166 int chipnum, ret = 0; 167 int i, first; 168 struct mtd_erase_region_info *regions = mtd->eraseregions; 169 170 if (ofs > mtd->size) 171 return -EINVAL; 172 173 if ((len + ofs) > mtd->size) 174 return -EINVAL; 175 176 /* Check that both start and end of the requested erase are 177 * aligned with the erasesize at the appropriate addresses. 178 */ 179 180 i = 0; 181 182 /* Skip all erase regions which are ended before the start of 183 the requested erase. Actually, to save on the calculations, 184 we skip to the first erase region which starts after the 185 start of the requested erase, and then go back one. 186 */ 187 188 while (i < mtd->numeraseregions && ofs >= regions[i].offset) 189 i++; 190 i--; 191 192 /* OK, now i is pointing at the erase region in which this 193 erase request starts. Check the start of the requested 194 erase range is aligned with the erase size which is in 195 effect here. 196 */ 197 198 if (ofs & (regions[i].erasesize-1)) 199 return -EINVAL; 200 201 /* Remember the erase region we start on */ 202 first = i; 203 204 /* Next, check that the end of the requested erase is aligned 205 * with the erase region at that address. 206 */ 207 208 while (i<mtd->numeraseregions && (ofs + len) >= regions[i].offset) 209 i++; 210 211 /* As before, drop back one to point at the region in which 212 the address actually falls 213 */ 214 i--; 215 216 if ((ofs + len) & (regions[i].erasesize-1)) 217 return -EINVAL; 218 219 chipnum = ofs >> cfi->chipshift; 220 adr = ofs - (chipnum << cfi->chipshift); 221 222 i=first; 223 224 while(len) { 225 int size = regions[i].erasesize; 226 227 ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk); 228 229 if (ret) 230 return ret; 231 232 adr += size; 233 ofs += size; 234 len -= size; 235 236 if (ofs == regions[i].offset + size * regions[i].numblocks) 237 i++; 238 239 if (adr >> cfi->chipshift) { 240 adr = 0; 241 chipnum++; 242 243 if (chipnum >= cfi->numchips) 244 break; 245 } 246 } 247 248 return 0; 249 } 250 251 EXPORT_SYMBOL(cfi_varsize_frob); 252 253 MODULE_LICENSE("GPL"); 254