1 /* SPDX-License-Identifier: GPL-2.0-or-later */ 2 /* 3 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. 4 */ 5 6 /* Overhauled routines for dealing with different mmap regions of flash */ 7 8 #ifndef __LINUX_MTD_MAP_H__ 9 #define __LINUX_MTD_MAP_H__ 10 11 #include <linux/bug.h> 12 #include <linux/io.h> 13 #include <linux/ioport.h> 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/unaligned.h> 17 18 struct device_node; 19 struct module; 20 21 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 22 #define map_bankwidth(map) 1 23 #define map_bankwidth_is_1(map) (map_bankwidth(map) == 1) 24 #define map_bankwidth_is_large(map) (0) 25 #define map_words(map) (1) 26 #define MAX_MAP_BANKWIDTH 1 27 #else 28 #define map_bankwidth_is_1(map) (0) 29 #endif 30 31 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 32 # ifdef map_bankwidth 33 # undef map_bankwidth 34 # define map_bankwidth(map) ((map)->bankwidth) 35 # else 36 # define map_bankwidth(map) 2 37 # define map_bankwidth_is_large(map) (0) 38 # define map_words(map) (1) 39 # endif 40 #define map_bankwidth_is_2(map) (map_bankwidth(map) == 2) 41 #undef MAX_MAP_BANKWIDTH 42 #define MAX_MAP_BANKWIDTH 2 43 #else 44 #define map_bankwidth_is_2(map) (0) 45 #endif 46 47 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 48 # ifdef map_bankwidth 49 # undef map_bankwidth 50 # define map_bankwidth(map) ((map)->bankwidth) 51 # else 52 # define map_bankwidth(map) 4 53 # define map_bankwidth_is_large(map) (0) 54 # define map_words(map) (1) 55 # endif 56 #define map_bankwidth_is_4(map) (map_bankwidth(map) == 4) 57 #undef MAX_MAP_BANKWIDTH 58 #define MAX_MAP_BANKWIDTH 4 59 #else 60 #define map_bankwidth_is_4(map) (0) 61 #endif 62 63 /* ensure we never evaluate anything shorted than an unsigned long 64 * to zero, and ensure we'll never miss the end of an comparison (bjd) */ 65 66 #define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long)) 67 68 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 69 # ifdef map_bankwidth 70 # undef map_bankwidth 71 # define map_bankwidth(map) ((map)->bankwidth) 72 # if BITS_PER_LONG < 64 73 # undef map_bankwidth_is_large 74 # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) 75 # undef map_words 76 # define map_words(map) map_calc_words(map) 77 # endif 78 # else 79 # define map_bankwidth(map) 8 80 # define map_bankwidth_is_large(map) (BITS_PER_LONG < 64) 81 # define map_words(map) map_calc_words(map) 82 # endif 83 #define map_bankwidth_is_8(map) (map_bankwidth(map) == 8) 84 #undef MAX_MAP_BANKWIDTH 85 #define MAX_MAP_BANKWIDTH 8 86 #else 87 #define map_bankwidth_is_8(map) (0) 88 #endif 89 90 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 91 # ifdef map_bankwidth 92 # undef map_bankwidth 93 # define map_bankwidth(map) ((map)->bankwidth) 94 # undef map_bankwidth_is_large 95 # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) 96 # undef map_words 97 # define map_words(map) map_calc_words(map) 98 # else 99 # define map_bankwidth(map) 16 100 # define map_bankwidth_is_large(map) (1) 101 # define map_words(map) map_calc_words(map) 102 # endif 103 #define map_bankwidth_is_16(map) (map_bankwidth(map) == 16) 104 #undef MAX_MAP_BANKWIDTH 105 #define MAX_MAP_BANKWIDTH 16 106 #else 107 #define map_bankwidth_is_16(map) (0) 108 #endif 109 110 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 111 /* always use indirect access for 256-bit to preserve kernel stack */ 112 # undef map_bankwidth 113 # define map_bankwidth(map) ((map)->bankwidth) 114 # undef map_bankwidth_is_large 115 # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) 116 # undef map_words 117 # define map_words(map) map_calc_words(map) 118 #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) 119 #undef MAX_MAP_BANKWIDTH 120 #define MAX_MAP_BANKWIDTH 32 121 #else 122 #define map_bankwidth_is_32(map) (0) 123 #endif 124 125 #ifndef map_bankwidth 126 #ifdef CONFIG_MTD 127 #warning "No CONFIG_MTD_MAP_BANK_WIDTH_xx selected. No NOR chip support can work" 128 #endif 129 static inline int map_bankwidth(void *map) 130 { 131 BUG(); 132 return 0; 133 } 134 #define map_bankwidth_is_large(map) (0) 135 #define map_words(map) (0) 136 #define MAX_MAP_BANKWIDTH 1 137 #endif 138 139 static inline int map_bankwidth_supported(int w) 140 { 141 switch (w) { 142 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 143 case 1: 144 #endif 145 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_2 146 case 2: 147 #endif 148 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_4 149 case 4: 150 #endif 151 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_8 152 case 8: 153 #endif 154 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_16 155 case 16: 156 #endif 157 #ifdef CONFIG_MTD_MAP_BANK_WIDTH_32 158 case 32: 159 #endif 160 return 1; 161 162 default: 163 return 0; 164 } 165 } 166 167 #define MAX_MAP_LONGS (((MAX_MAP_BANKWIDTH * 8) + BITS_PER_LONG - 1) / BITS_PER_LONG) 168 169 typedef union { 170 unsigned long x[MAX_MAP_LONGS]; 171 } map_word; 172 173 /* The map stuff is very simple. You fill in your struct map_info with 174 a handful of routines for accessing the device, making sure they handle 175 paging etc. correctly if your device needs it. Then you pass it off 176 to a chip probe routine -- either JEDEC or CFI probe or both -- via 177 do_map_probe(). If a chip is recognised, the probe code will invoke the 178 appropriate chip driver (if present) and return a struct mtd_info. 179 At which point, you fill in the mtd->module with your own module 180 address, and register it with the MTD core code. Or you could partition 181 it and register the partitions instead, or keep it for your own private 182 use; whatever. 183 184 The mtd->priv field will point to the struct map_info, and any further 185 private data required by the chip driver is linked from the 186 mtd->priv->fldrv_priv field. This allows the map driver to get at 187 the destructor function map->fldrv_destroy() when it's tired 188 of living. 189 */ 190 191 struct mtd_chip_driver; 192 struct map_info { 193 const char *name; 194 unsigned long size; 195 resource_size_t phys; 196 #define NO_XIP (-1UL) 197 198 void __iomem *virt; 199 void *cached; 200 201 int swap; /* this mapping's byte-swapping requirement */ 202 int bankwidth; /* in octets. This isn't necessarily the width 203 of actual bus cycles -- it's the repeat interval 204 in bytes, before you are talking to the first chip again. 205 */ 206 207 #ifdef CONFIG_MTD_COMPLEX_MAPPINGS 208 map_word (*read)(struct map_info *, unsigned long); 209 void (*copy_from)(struct map_info *, void *, unsigned long, ssize_t); 210 211 void (*write)(struct map_info *, const map_word, unsigned long); 212 void (*copy_to)(struct map_info *, unsigned long, const void *, ssize_t); 213 214 /* We can perhaps put in 'point' and 'unpoint' methods, if we really 215 want to enable XIP for non-linear mappings. Not yet though. */ 216 #endif 217 /* It's possible for the map driver to use cached memory in its 218 copy_from implementation (and _only_ with copy_from). However, 219 when the chip driver knows some flash area has changed contents, 220 it will signal it to the map driver through this routine to let 221 the map driver invalidate the corresponding cache as needed. 222 If there is no cache to care about this can be set to NULL. */ 223 void (*inval_cache)(struct map_info *, unsigned long, ssize_t); 224 225 /* This will be called with 1 as parameter when the first map user 226 * needs VPP, and called with 0 when the last user exits. The map 227 * core maintains a reference counter, and assumes that VPP is a 228 * global resource applying to all mapped flash chips on the system. 229 */ 230 void (*set_vpp)(struct map_info *, int); 231 232 unsigned long pfow_base; 233 unsigned long map_priv_1; 234 unsigned long map_priv_2; 235 struct device_node *device_node; 236 void *fldrv_priv; 237 struct mtd_chip_driver *fldrv; 238 }; 239 240 struct mtd_chip_driver { 241 struct mtd_info *(*probe)(struct map_info *map); 242 void (*destroy)(struct mtd_info *); 243 struct module *module; 244 char *name; 245 struct list_head list; 246 }; 247 248 void register_mtd_chip_driver(struct mtd_chip_driver *); 249 void unregister_mtd_chip_driver(struct mtd_chip_driver *); 250 251 struct mtd_info *do_map_probe(const char *name, struct map_info *map); 252 void map_destroy(struct mtd_info *mtd); 253 254 #define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0) 255 #define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0) 256 257 #define INVALIDATE_CACHED_RANGE(map, from, size) \ 258 do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) 259 260 #define map_word_equal(map, val1, val2) \ 261 ({ \ 262 int i, ret = 1; \ 263 for (i = 0; i < map_words(map); i++) \ 264 if ((val1).x[i] != (val2).x[i]) { \ 265 ret = 0; \ 266 break; \ 267 } \ 268 ret; \ 269 }) 270 271 #define map_word_and(map, val1, val2) \ 272 ({ \ 273 map_word r; \ 274 int i; \ 275 for (i = 0; i < map_words(map); i++) \ 276 r.x[i] = (val1).x[i] & (val2).x[i]; \ 277 r; \ 278 }) 279 280 #define map_word_clr(map, val1, val2) \ 281 ({ \ 282 map_word r; \ 283 int i; \ 284 for (i = 0; i < map_words(map); i++) \ 285 r.x[i] = (val1).x[i] & ~(val2).x[i]; \ 286 r; \ 287 }) 288 289 #define map_word_or(map, val1, val2) \ 290 ({ \ 291 map_word r; \ 292 int i; \ 293 for (i = 0; i < map_words(map); i++) \ 294 r.x[i] = (val1).x[i] | (val2).x[i]; \ 295 r; \ 296 }) 297 298 #define map_word_andequal(map, val1, val2, val3) \ 299 ({ \ 300 int i, ret = 1; \ 301 for (i = 0; i < map_words(map); i++) { \ 302 if (((val1).x[i] & (val2).x[i]) != (val3).x[i]) { \ 303 ret = 0; \ 304 break; \ 305 } \ 306 } \ 307 ret; \ 308 }) 309 310 #define map_word_bitsset(map, val1, val2) \ 311 ({ \ 312 int i, ret = 0; \ 313 for (i = 0; i < map_words(map); i++) { \ 314 if ((val1).x[i] & (val2).x[i]) { \ 315 ret = 1; \ 316 break; \ 317 } \ 318 } \ 319 ret; \ 320 }) 321 322 static inline map_word map_word_load(struct map_info *map, const void *ptr) 323 { 324 map_word r; 325 326 if (map_bankwidth_is_1(map)) 327 r.x[0] = *(unsigned char *)ptr; 328 else if (map_bankwidth_is_2(map)) 329 r.x[0] = get_unaligned((uint16_t *)ptr); 330 else if (map_bankwidth_is_4(map)) 331 r.x[0] = get_unaligned((uint32_t *)ptr); 332 #if BITS_PER_LONG >= 64 333 else if (map_bankwidth_is_8(map)) 334 r.x[0] = get_unaligned((uint64_t *)ptr); 335 #endif 336 else if (map_bankwidth_is_large(map)) 337 memcpy(r.x, ptr, map->bankwidth); 338 else 339 BUG(); 340 341 return r; 342 } 343 344 static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len) 345 { 346 int i; 347 348 if (map_bankwidth_is_large(map)) { 349 char *dest = (char *)&orig; 350 351 memcpy(dest+start, buf, len); 352 } else { 353 for (i = start; i < start+len; i++) { 354 int bitpos; 355 356 #ifdef __LITTLE_ENDIAN 357 bitpos = i * 8; 358 #else /* __BIG_ENDIAN */ 359 bitpos = (map_bankwidth(map) - 1 - i) * 8; 360 #endif 361 orig.x[0] &= ~(0xff << bitpos); 362 orig.x[0] |= (unsigned long)buf[i-start] << bitpos; 363 } 364 } 365 return orig; 366 } 367 368 #if BITS_PER_LONG < 64 369 #define MAP_FF_LIMIT 4 370 #else 371 #define MAP_FF_LIMIT 8 372 #endif 373 374 static inline map_word map_word_ff(struct map_info *map) 375 { 376 map_word r; 377 int i; 378 379 if (map_bankwidth(map) < MAP_FF_LIMIT) { 380 int bw = 8 * map_bankwidth(map); 381 382 r.x[0] = (1UL << bw) - 1; 383 } else { 384 for (i = 0; i < map_words(map); i++) 385 r.x[i] = ~0UL; 386 } 387 return r; 388 } 389 390 static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) 391 { 392 map_word r; 393 394 if (map_bankwidth_is_1(map)) 395 r.x[0] = __raw_readb(map->virt + ofs); 396 else if (map_bankwidth_is_2(map)) 397 r.x[0] = __raw_readw(map->virt + ofs); 398 else if (map_bankwidth_is_4(map)) 399 r.x[0] = __raw_readl(map->virt + ofs); 400 #if BITS_PER_LONG >= 64 401 else if (map_bankwidth_is_8(map)) 402 r.x[0] = __raw_readq(map->virt + ofs); 403 #endif 404 else if (map_bankwidth_is_large(map)) 405 memcpy_fromio(r.x, map->virt + ofs, map->bankwidth); 406 else 407 BUG(); 408 409 return r; 410 } 411 412 static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs) 413 { 414 if (map_bankwidth_is_1(map)) 415 __raw_writeb(datum.x[0], map->virt + ofs); 416 else if (map_bankwidth_is_2(map)) 417 __raw_writew(datum.x[0], map->virt + ofs); 418 else if (map_bankwidth_is_4(map)) 419 __raw_writel(datum.x[0], map->virt + ofs); 420 #if BITS_PER_LONG >= 64 421 else if (map_bankwidth_is_8(map)) 422 __raw_writeq(datum.x[0], map->virt + ofs); 423 #endif 424 else if (map_bankwidth_is_large(map)) 425 memcpy_toio(map->virt+ofs, datum.x, map->bankwidth); 426 else 427 BUG(); 428 mb(); 429 } 430 431 static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) 432 { 433 if (map->cached) 434 memcpy(to, (char *)map->cached + from, len); 435 else 436 memcpy_fromio(to, map->virt + from, len); 437 } 438 439 static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) 440 { 441 memcpy_toio(map->virt + to, from, len); 442 } 443 444 #ifdef CONFIG_MTD_COMPLEX_MAPPINGS 445 #define map_read(map, ofs) (map)->read(map, ofs) 446 #define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len) 447 #define map_write(map, datum, ofs) (map)->write(map, datum, ofs) 448 #define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len) 449 450 extern void simple_map_init(struct map_info *); 451 #define map_is_linear(map) (map->phys != NO_XIP) 452 453 #else 454 #define map_read(map, ofs) inline_map_read(map, ofs) 455 #define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len) 456 #define map_write(map, datum, ofs) inline_map_write(map, datum, ofs) 457 #define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len) 458 459 460 #define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth)) 461 #define map_is_linear(map) ({ (void)(map); 1; }) 462 463 #endif /* !CONFIG_MTD_COMPLEX_MAPPINGS */ 464 465 #endif /* __LINUX_MTD_MAP_H__ */ 466