1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with 4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c 5 * 6 * Copyright (C) 2005, Intec Automation Inc. 7 * Copyright (C) 2014, Freescale Semiconductor, Inc. 8 */ 9 10 #include <linux/cleanup.h> 11 #include <linux/delay.h> 12 #include <linux/device.h> 13 #include <linux/err.h> 14 #include <linux/errno.h> 15 #include <linux/math64.h> 16 #include <linux/module.h> 17 #include <linux/mtd/mtd.h> 18 #include <linux/mtd/spi-nor.h> 19 #include <linux/mutex.h> 20 #include <linux/of.h> 21 #include <linux/regulator/consumer.h> 22 #include <linux/sched/task_stack.h> 23 #include <linux/sizes.h> 24 #include <linux/slab.h> 25 #include <linux/spi/flash.h> 26 27 #include "core.h" 28 29 /* Define max times to check status register before we give up. */ 30 31 /* 32 * For everything but full-chip erase; probably could be much smaller, but kept 33 * around for safety for now 34 */ 35 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) 36 37 /* 38 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up 39 * for larger flash 40 */ 41 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) 42 43 #define SPI_NOR_MAX_ADDR_NBYTES 4 44 45 #define SPI_NOR_SRST_SLEEP_MIN 200 46 #define SPI_NOR_SRST_SLEEP_MAX 400 47 48 /** 49 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the 50 * extension type. 51 * @nor: pointer to a 'struct spi_nor' 52 * @op: pointer to the 'struct spi_mem_op' whose properties 53 * need to be initialized. 54 * 55 * Right now, only "repeat" and "invert" are supported. 56 * 57 * Return: The opcode extension. 58 */ 59 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor, 60 const struct spi_mem_op *op) 61 { 62 switch (nor->cmd_ext_type) { 63 case SPI_NOR_EXT_INVERT: 64 return ~op->cmd.opcode; 65 66 case SPI_NOR_EXT_REPEAT: 67 return op->cmd.opcode; 68 69 default: 70 dev_err(nor->dev, "Unknown command extension type\n"); 71 return 0; 72 } 73 } 74 75 /** 76 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op. 77 * @nor: pointer to a 'struct spi_nor' 78 * @op: pointer to the 'struct spi_mem_op' whose properties 79 * need to be initialized. 80 * @proto: the protocol from which the properties need to be set. 81 */ 82 void spi_nor_spimem_setup_op(const struct spi_nor *nor, 83 struct spi_mem_op *op, 84 const enum spi_nor_protocol proto) 85 { 86 u8 ext; 87 88 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto); 89 90 if (op->addr.nbytes) 91 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); 92 93 if (op->dummy.nbytes) 94 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); 95 96 if (op->data.nbytes) 97 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); 98 99 if (spi_nor_protocol_is_dtr(proto)) { 100 /* 101 * SPIMEM supports mixed DTR modes, but right now we can only 102 * have all phases either DTR or STR. IOW, SPIMEM can have 103 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4 104 * phases to either DTR or STR. 105 */ 106 op->cmd.dtr = true; 107 op->addr.dtr = true; 108 op->dummy.dtr = true; 109 op->data.dtr = true; 110 111 /* 2 bytes per clock cycle in DTR mode. */ 112 op->dummy.nbytes *= 2; 113 114 ext = spi_nor_get_cmd_ext(nor, op); 115 op->cmd.opcode = (op->cmd.opcode << 8) | ext; 116 op->cmd.nbytes = 2; 117 } 118 119 if (proto == SNOR_PROTO_8_8_8_DTR && nor->flags & SNOR_F_SWAP16) 120 op->data.swap16 = true; 121 } 122 123 /** 124 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data 125 * transfer 126 * @nor: pointer to 'struct spi_nor' 127 * @op: pointer to 'struct spi_mem_op' template for transfer 128 * 129 * If we have to use the bounce buffer, the data field in @op will be updated. 130 * 131 * Return: true if the bounce buffer is needed, false if not 132 */ 133 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op) 134 { 135 /* op->data.buf.in occupies the same memory as op->data.buf.out */ 136 if (object_is_on_stack(op->data.buf.in) || 137 !virt_addr_valid(op->data.buf.in)) { 138 if (op->data.nbytes > nor->bouncebuf_size) 139 op->data.nbytes = nor->bouncebuf_size; 140 op->data.buf.in = nor->bouncebuf; 141 return true; 142 } 143 144 return false; 145 } 146 147 /** 148 * spi_nor_spimem_exec_op() - execute a memory operation 149 * @nor: pointer to 'struct spi_nor' 150 * @op: pointer to 'struct spi_mem_op' template for transfer 151 * 152 * Return: 0 on success, -error otherwise. 153 */ 154 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op) 155 { 156 int error; 157 158 error = spi_mem_adjust_op_size(nor->spimem, op); 159 if (error) 160 return error; 161 162 return spi_mem_exec_op(nor->spimem, op); 163 } 164 165 int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode, 166 u8 *buf, size_t len) 167 { 168 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 169 return -EOPNOTSUPP; 170 171 return nor->controller_ops->read_reg(nor, opcode, buf, len); 172 } 173 174 int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, 175 const u8 *buf, size_t len) 176 { 177 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 178 return -EOPNOTSUPP; 179 180 return nor->controller_ops->write_reg(nor, opcode, buf, len); 181 } 182 183 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs) 184 { 185 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 186 return -EOPNOTSUPP; 187 188 return nor->controller_ops->erase(nor, offs); 189 } 190 191 /** 192 * spi_nor_spimem_read_data() - read data from flash's memory region via 193 * spi-mem 194 * @nor: pointer to 'struct spi_nor' 195 * @from: offset to read from 196 * @len: number of bytes to read 197 * @buf: pointer to dst buffer 198 * 199 * Return: number of bytes read successfully, -errno otherwise 200 */ 201 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, 202 size_t len, u8 *buf) 203 { 204 struct spi_mem_op op = 205 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 206 SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0), 207 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 208 SPI_MEM_OP_DATA_IN(len, buf, 0)); 209 bool usebouncebuf; 210 ssize_t nbytes; 211 int error; 212 213 spi_nor_spimem_setup_op(nor, &op, nor->read_proto); 214 215 /* convert the dummy cycles to the number of bytes */ 216 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 217 if (spi_nor_protocol_is_dtr(nor->read_proto)) 218 op.dummy.nbytes *= 2; 219 220 usebouncebuf = spi_nor_spimem_bounce(nor, &op); 221 222 if (nor->dirmap.rdesc) { 223 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val, 224 op.data.nbytes, op.data.buf.in); 225 } else { 226 error = spi_nor_spimem_exec_op(nor, &op); 227 if (error) 228 return error; 229 nbytes = op.data.nbytes; 230 } 231 232 if (usebouncebuf && nbytes > 0) 233 memcpy(buf, op.data.buf.in, nbytes); 234 235 return nbytes; 236 } 237 238 /** 239 * spi_nor_read_data() - read data from flash memory 240 * @nor: pointer to 'struct spi_nor' 241 * @from: offset to read from 242 * @len: number of bytes to read 243 * @buf: pointer to dst buffer 244 * 245 * Return: number of bytes read successfully, -errno otherwise 246 */ 247 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf) 248 { 249 if (nor->spimem) 250 return spi_nor_spimem_read_data(nor, from, len, buf); 251 252 return nor->controller_ops->read(nor, from, len, buf); 253 } 254 255 /** 256 * spi_nor_spimem_write_data() - write data to flash memory via 257 * spi-mem 258 * @nor: pointer to 'struct spi_nor' 259 * @to: offset to write to 260 * @len: number of bytes to write 261 * @buf: pointer to src buffer 262 * 263 * Return: number of bytes written successfully, -errno otherwise 264 */ 265 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, 266 size_t len, const u8 *buf) 267 { 268 struct spi_mem_op op = 269 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 270 SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0), 271 SPI_MEM_OP_NO_DUMMY, 272 SPI_MEM_OP_DATA_OUT(len, buf, 0)); 273 ssize_t nbytes; 274 int error; 275 276 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 277 op.addr.nbytes = 0; 278 279 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 280 281 if (spi_nor_spimem_bounce(nor, &op)) 282 memcpy(nor->bouncebuf, buf, op.data.nbytes); 283 284 if (nor->dirmap.wdesc) { 285 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val, 286 op.data.nbytes, op.data.buf.out); 287 } else { 288 error = spi_nor_spimem_exec_op(nor, &op); 289 if (error) 290 return error; 291 nbytes = op.data.nbytes; 292 } 293 294 return nbytes; 295 } 296 297 /** 298 * spi_nor_write_data() - write data to flash memory 299 * @nor: pointer to 'struct spi_nor' 300 * @to: offset to write to 301 * @len: number of bytes to write 302 * @buf: pointer to src buffer 303 * 304 * Return: number of bytes written successfully, -errno otherwise 305 */ 306 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, 307 const u8 *buf) 308 { 309 if (nor->spimem) 310 return spi_nor_spimem_write_data(nor, to, len, buf); 311 312 return nor->controller_ops->write(nor, to, len, buf); 313 } 314 315 /** 316 * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or 317 * volatile. 318 * @nor: pointer to 'struct spi_nor'. 319 * @op: SPI memory operation. op->data.buf must be DMA-able. 320 * @proto: SPI protocol to use for the register operation. 321 * 322 * Return: zero on success, -errno otherwise 323 */ 324 int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op, 325 enum spi_nor_protocol proto) 326 { 327 if (!nor->spimem) 328 return -EOPNOTSUPP; 329 330 spi_nor_spimem_setup_op(nor, op, proto); 331 return spi_nor_spimem_exec_op(nor, op); 332 } 333 334 /** 335 * spi_nor_write_any_volatile_reg() - write any volatile register to flash 336 * memory. 337 * @nor: pointer to 'struct spi_nor' 338 * @op: SPI memory operation. op->data.buf must be DMA-able. 339 * @proto: SPI protocol to use for the register operation. 340 * 341 * Writing volatile registers are instant according to some manufacturers 342 * (Cypress, Micron) and do not need any status polling. 343 * 344 * Return: zero on success, -errno otherwise 345 */ 346 int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op, 347 enum spi_nor_protocol proto) 348 { 349 int ret; 350 351 if (!nor->spimem) 352 return -EOPNOTSUPP; 353 354 ret = spi_nor_write_enable(nor); 355 if (ret) 356 return ret; 357 spi_nor_spimem_setup_op(nor, op, proto); 358 return spi_nor_spimem_exec_op(nor, op); 359 } 360 361 /** 362 * spi_nor_write_enable() - Set write enable latch with Write Enable command. 363 * @nor: pointer to 'struct spi_nor'. 364 * 365 * Return: 0 on success, -errno otherwise. 366 */ 367 int spi_nor_write_enable(struct spi_nor *nor) 368 { 369 int ret; 370 371 if (nor->spimem) { 372 struct spi_mem_op op = SPI_NOR_WREN_OP; 373 374 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 375 376 ret = spi_mem_exec_op(nor->spimem, &op); 377 } else { 378 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN, 379 NULL, 0); 380 } 381 382 if (ret) 383 dev_dbg(nor->dev, "error %d on Write Enable\n", ret); 384 385 return ret; 386 } 387 388 /** 389 * spi_nor_write_disable() - Send Write Disable instruction to the chip. 390 * @nor: pointer to 'struct spi_nor'. 391 * 392 * Return: 0 on success, -errno otherwise. 393 */ 394 int spi_nor_write_disable(struct spi_nor *nor) 395 { 396 int ret; 397 398 if (nor->spimem) { 399 struct spi_mem_op op = SPI_NOR_WRDI_OP; 400 401 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 402 403 ret = spi_mem_exec_op(nor->spimem, &op); 404 } else { 405 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI, 406 NULL, 0); 407 } 408 409 if (ret) 410 dev_dbg(nor->dev, "error %d on Write Disable\n", ret); 411 412 return ret; 413 } 414 415 /** 416 * spi_nor_read_id() - Read the JEDEC ID. 417 * @nor: pointer to 'struct spi_nor'. 418 * @naddr: number of address bytes to send. Can be zero if the operation 419 * does not need to send an address. 420 * @ndummy: number of dummy bytes to send after an opcode or address. Can 421 * be zero if the operation does not require dummy bytes. 422 * @id: pointer to a DMA-able buffer where the value of the JEDEC ID 423 * will be written. 424 * @proto: the SPI protocol for register operation. 425 * 426 * Return: 0 on success, -errno otherwise. 427 */ 428 int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id, 429 enum spi_nor_protocol proto) 430 { 431 int ret; 432 433 if (nor->spimem) { 434 struct spi_mem_op op = 435 SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN); 436 437 spi_nor_spimem_setup_op(nor, &op, proto); 438 ret = spi_mem_exec_op(nor->spimem, &op); 439 } else { 440 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, 441 SPI_NOR_MAX_ID_LEN); 442 } 443 return ret; 444 } 445 446 /** 447 * spi_nor_read_sr() - Read the Status Register. 448 * @nor: pointer to 'struct spi_nor'. 449 * @sr: pointer to a DMA-able buffer where the value of the 450 * Status Register will be written. Should be at least 2 bytes. 451 * 452 * Return: 0 on success, -errno otherwise. 453 */ 454 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) 455 { 456 int ret; 457 458 if (nor->spimem) { 459 struct spi_mem_op op = SPI_NOR_RDSR_OP(sr); 460 461 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { 462 op.addr.nbytes = nor->params->rdsr_addr_nbytes; 463 op.dummy.nbytes = nor->params->rdsr_dummy; 464 /* 465 * We don't want to read only one byte in DTR mode. So, 466 * read 2 and then discard the second byte. 467 */ 468 op.data.nbytes = 2; 469 } 470 471 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 472 473 ret = spi_mem_exec_op(nor->spimem, &op); 474 } else { 475 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr, 476 1); 477 } 478 479 if (ret) 480 dev_dbg(nor->dev, "error %d reading SR\n", ret); 481 482 return ret; 483 } 484 485 /** 486 * spi_nor_read_cr() - Read the Configuration Register using the 487 * SPINOR_OP_RDCR (35h) command. 488 * @nor: pointer to 'struct spi_nor' 489 * @cr: pointer to a DMA-able buffer where the value of the 490 * Configuration Register will be written. 491 * 492 * Return: 0 on success, -errno otherwise. 493 */ 494 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) 495 { 496 int ret; 497 498 if (nor->spimem) { 499 struct spi_mem_op op = SPI_NOR_RDCR_OP(cr); 500 501 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 502 503 ret = spi_mem_exec_op(nor->spimem, &op); 504 } else { 505 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr, 506 1); 507 } 508 509 if (ret) 510 dev_dbg(nor->dev, "error %d reading CR\n", ret); 511 512 return ret; 513 } 514 515 /** 516 * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode 517 * using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by 518 * Winbond and Macronix. 519 * @nor: pointer to 'struct spi_nor'. 520 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 521 * address mode. 522 * 523 * Return: 0 on success, -errno otherwise. 524 */ 525 int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable) 526 { 527 int ret; 528 529 if (nor->spimem) { 530 struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable); 531 532 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 533 534 ret = spi_mem_exec_op(nor->spimem, &op); 535 } else { 536 ret = spi_nor_controller_ops_write_reg(nor, 537 enable ? SPINOR_OP_EN4B : 538 SPINOR_OP_EX4B, 539 NULL, 0); 540 } 541 542 if (ret) 543 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 544 545 return ret; 546 } 547 548 /** 549 * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using 550 * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used 551 * by ST and Micron flashes. 552 * @nor: pointer to 'struct spi_nor'. 553 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 554 * address mode. 555 * 556 * Return: 0 on success, -errno otherwise. 557 */ 558 int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable) 559 { 560 int ret; 561 562 ret = spi_nor_write_enable(nor); 563 if (ret) 564 return ret; 565 566 ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable); 567 if (ret) 568 return ret; 569 570 return spi_nor_write_disable(nor); 571 } 572 573 /** 574 * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using 575 * SPINOR_OP_BRWR. Typically used by Spansion flashes. 576 * @nor: pointer to 'struct spi_nor'. 577 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 578 * address mode. 579 * 580 * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is 581 * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte 582 * address mode is active and A[30:24] bits are don’t care. Write instruction is 583 * SPINOR_OP_BRWR(17h) with 1 byte of data. 584 * 585 * Return: 0 on success, -errno otherwise. 586 */ 587 int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable) 588 { 589 int ret; 590 591 nor->bouncebuf[0] = enable << 7; 592 593 if (nor->spimem) { 594 struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf); 595 596 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 597 598 ret = spi_mem_exec_op(nor->spimem, &op); 599 } else { 600 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR, 601 nor->bouncebuf, 1); 602 } 603 604 if (ret) 605 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 606 607 return ret; 608 } 609 610 /** 611 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready 612 * for new commands. 613 * @nor: pointer to 'struct spi_nor'. 614 * 615 * Return: 1 if ready, 0 if not ready, -errno on errors. 616 */ 617 int spi_nor_sr_ready(struct spi_nor *nor) 618 { 619 int ret; 620 621 ret = spi_nor_read_sr(nor, nor->bouncebuf); 622 if (ret) 623 return ret; 624 625 return !(nor->bouncebuf[0] & SR_WIP); 626 } 627 628 /** 629 * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used 630 * @nor: pointer to 'struct spi_nor'. 631 * 632 * Return: true if parallel locking is enabled, false otherwise. 633 */ 634 static bool spi_nor_use_parallel_locking(struct spi_nor *nor) 635 { 636 return nor->flags & SNOR_F_RWW; 637 } 638 639 /* Locking helpers for status read operations */ 640 static int spi_nor_rww_start_rdst(struct spi_nor *nor) 641 { 642 struct spi_nor_rww *rww = &nor->rww; 643 644 guard(mutex)(&nor->lock); 645 646 if (rww->ongoing_io || rww->ongoing_rd) 647 return -EAGAIN; 648 649 rww->ongoing_io = true; 650 rww->ongoing_rd = true; 651 652 return 0; 653 } 654 655 static void spi_nor_rww_end_rdst(struct spi_nor *nor) 656 { 657 struct spi_nor_rww *rww = &nor->rww; 658 659 guard(mutex)(&nor->lock); 660 661 rww->ongoing_io = false; 662 rww->ongoing_rd = false; 663 } 664 665 static int spi_nor_lock_rdst(struct spi_nor *nor) 666 { 667 if (spi_nor_use_parallel_locking(nor)) 668 return spi_nor_rww_start_rdst(nor); 669 670 return 0; 671 } 672 673 static void spi_nor_unlock_rdst(struct spi_nor *nor) 674 { 675 if (spi_nor_use_parallel_locking(nor)) { 676 spi_nor_rww_end_rdst(nor); 677 wake_up(&nor->rww.wait); 678 } 679 } 680 681 /** 682 * spi_nor_ready() - Query the flash to see if it is ready for new commands. 683 * @nor: pointer to 'struct spi_nor'. 684 * 685 * Return: 1 if ready, 0 if not ready, -errno on errors. 686 */ 687 static int spi_nor_ready(struct spi_nor *nor) 688 { 689 int ret; 690 691 ret = spi_nor_lock_rdst(nor); 692 if (ret) 693 return 0; 694 695 /* Flashes might override the standard routine. */ 696 if (nor->params->ready) 697 ret = nor->params->ready(nor); 698 else 699 ret = spi_nor_sr_ready(nor); 700 701 spi_nor_unlock_rdst(nor); 702 703 return ret; 704 } 705 706 /** 707 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the 708 * Status Register until ready, or timeout occurs. 709 * @nor: pointer to "struct spi_nor". 710 * @timeout_jiffies: jiffies to wait until timeout. 711 * 712 * Return: 0 on success, -errno otherwise. 713 */ 714 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, 715 unsigned long timeout_jiffies) 716 { 717 unsigned long deadline; 718 int timeout = 0, ret; 719 720 deadline = jiffies + timeout_jiffies; 721 722 while (!timeout) { 723 if (time_after_eq(jiffies, deadline)) 724 timeout = 1; 725 726 ret = spi_nor_ready(nor); 727 if (ret < 0) 728 return ret; 729 if (ret) 730 return 0; 731 732 cond_resched(); 733 } 734 735 dev_dbg(nor->dev, "flash operation timed out\n"); 736 737 return -ETIMEDOUT; 738 } 739 740 /** 741 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the 742 * flash to be ready, or timeout occurs. 743 * @nor: pointer to "struct spi_nor". 744 * 745 * Return: 0 on success, -errno otherwise. 746 */ 747 int spi_nor_wait_till_ready(struct spi_nor *nor) 748 { 749 return spi_nor_wait_till_ready_with_timeout(nor, 750 DEFAULT_READY_WAIT_JIFFIES); 751 } 752 753 /** 754 * spi_nor_global_block_unlock() - Unlock Global Block Protection. 755 * @nor: pointer to 'struct spi_nor'. 756 * 757 * Return: 0 on success, -errno otherwise. 758 */ 759 int spi_nor_global_block_unlock(struct spi_nor *nor) 760 { 761 int ret; 762 763 ret = spi_nor_write_enable(nor); 764 if (ret) 765 return ret; 766 767 if (nor->spimem) { 768 struct spi_mem_op op = SPI_NOR_GBULK_OP; 769 770 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 771 772 ret = spi_mem_exec_op(nor->spimem, &op); 773 } else { 774 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK, 775 NULL, 0); 776 } 777 778 if (ret) { 779 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret); 780 return ret; 781 } 782 783 return spi_nor_wait_till_ready(nor); 784 } 785 786 /** 787 * spi_nor_write_sr() - Write the Status Register. 788 * @nor: pointer to 'struct spi_nor'. 789 * @sr: pointer to DMA-able buffer to write to the Status Register. 790 * @len: number of bytes to write to the Status Register. 791 * 792 * Return: 0 on success, -errno otherwise. 793 */ 794 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) 795 { 796 int ret; 797 798 ret = spi_nor_write_enable(nor); 799 if (ret) 800 return ret; 801 802 if (nor->spimem) { 803 struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len); 804 805 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 806 807 ret = spi_mem_exec_op(nor->spimem, &op); 808 } else { 809 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr, 810 len); 811 } 812 813 if (ret) { 814 dev_dbg(nor->dev, "error %d writing SR\n", ret); 815 return ret; 816 } 817 818 return spi_nor_wait_till_ready(nor); 819 } 820 821 /** 822 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and 823 * ensure that the byte written match the received value. 824 * @nor: pointer to a 'struct spi_nor'. 825 * @sr1: byte value to be written to the Status Register. 826 * 827 * Return: 0 on success, -errno otherwise. 828 */ 829 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1) 830 { 831 int ret; 832 833 nor->bouncebuf[0] = sr1; 834 835 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1); 836 if (ret) 837 return ret; 838 839 ret = spi_nor_read_sr(nor, nor->bouncebuf); 840 if (ret) 841 return ret; 842 843 if (nor->bouncebuf[0] != sr1) { 844 dev_dbg(nor->dev, "SR1: read back test failed\n"); 845 return -EIO; 846 } 847 848 return 0; 849 } 850 851 /** 852 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the 853 * Status Register 2 in one shot. Ensure that the byte written in the Status 854 * Register 1 match the received value, and that the 16-bit Write did not 855 * affect what was already in the Status Register 2. 856 * @nor: pointer to a 'struct spi_nor'. 857 * @sr1: byte value to be written to the Status Register 1. 858 * 859 * Return: 0 on success, -errno otherwise. 860 */ 861 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) 862 { 863 int ret; 864 u8 *sr_cr = nor->bouncebuf; 865 u8 cr_written; 866 867 /* Make sure we don't overwrite the contents of Status Register 2. */ 868 if (!(nor->flags & SNOR_F_NO_READ_CR)) { 869 ret = spi_nor_read_cr(nor, &sr_cr[1]); 870 if (ret) 871 return ret; 872 } else if (spi_nor_get_protocol_width(nor->read_proto) == 4 && 873 spi_nor_get_protocol_width(nor->write_proto) == 4 && 874 nor->params->quad_enable) { 875 /* 876 * If the Status Register 2 Read command (35h) is not 877 * supported, we should at least be sure we don't 878 * change the value of the SR2 Quad Enable bit. 879 * 880 * When the Quad Enable method is set and the buswidth is 4, we 881 * can safely assume that the value of the QE bit is one, as a 882 * consequence of the nor->params->quad_enable() call. 883 * 884 * According to the JESD216 revB standard, BFPT DWORDS[15], 885 * bits 22:20, the 16-bit Write Status (01h) command is 886 * available just for the cases in which the QE bit is 887 * described in SR2 at BIT(1). 888 */ 889 sr_cr[1] = SR2_QUAD_EN_BIT1; 890 } else { 891 sr_cr[1] = 0; 892 } 893 894 sr_cr[0] = sr1; 895 896 ret = spi_nor_write_sr(nor, sr_cr, 2); 897 if (ret) 898 return ret; 899 900 ret = spi_nor_read_sr(nor, sr_cr); 901 if (ret) 902 return ret; 903 904 if (sr1 != sr_cr[0]) { 905 dev_dbg(nor->dev, "SR: Read back test failed\n"); 906 return -EIO; 907 } 908 909 if (nor->flags & SNOR_F_NO_READ_CR) 910 return 0; 911 912 cr_written = sr_cr[1]; 913 914 ret = spi_nor_read_cr(nor, &sr_cr[1]); 915 if (ret) 916 return ret; 917 918 if (cr_written != sr_cr[1]) { 919 dev_dbg(nor->dev, "CR: read back test failed\n"); 920 return -EIO; 921 } 922 923 return 0; 924 } 925 926 /** 927 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the 928 * Configuration Register in one shot. Ensure that the byte written in the 929 * Configuration Register match the received value, and that the 16-bit Write 930 * did not affect what was already in the Status Register 1. 931 * @nor: pointer to a 'struct spi_nor'. 932 * @cr: byte value to be written to the Configuration Register. 933 * 934 * Return: 0 on success, -errno otherwise. 935 */ 936 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) 937 { 938 int ret; 939 u8 *sr_cr = nor->bouncebuf; 940 u8 sr_written; 941 942 /* Keep the current value of the Status Register 1. */ 943 ret = spi_nor_read_sr(nor, sr_cr); 944 if (ret) 945 return ret; 946 947 sr_cr[1] = cr; 948 949 ret = spi_nor_write_sr(nor, sr_cr, 2); 950 if (ret) 951 return ret; 952 953 sr_written = sr_cr[0]; 954 955 ret = spi_nor_read_sr(nor, sr_cr); 956 if (ret) 957 return ret; 958 959 if (sr_written != sr_cr[0]) { 960 dev_dbg(nor->dev, "SR: Read back test failed\n"); 961 return -EIO; 962 } 963 964 if (nor->flags & SNOR_F_NO_READ_CR) 965 return 0; 966 967 ret = spi_nor_read_cr(nor, &sr_cr[1]); 968 if (ret) 969 return ret; 970 971 if (cr != sr_cr[1]) { 972 dev_dbg(nor->dev, "CR: read back test failed\n"); 973 return -EIO; 974 } 975 976 return 0; 977 } 978 979 /** 980 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that 981 * the byte written match the received value without affecting other bits in the 982 * Status Register 1 and 2. 983 * @nor: pointer to a 'struct spi_nor'. 984 * @sr1: byte value to be written to the Status Register. 985 * 986 * Return: 0 on success, -errno otherwise. 987 */ 988 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) 989 { 990 if (nor->flags & SNOR_F_HAS_16BIT_SR) 991 return spi_nor_write_16bit_sr_and_check(nor, sr1); 992 993 return spi_nor_write_sr1_and_check(nor, sr1); 994 } 995 996 /** 997 * spi_nor_write_sr2() - Write the Status Register 2 using the 998 * SPINOR_OP_WRSR2 (3eh) command. 999 * @nor: pointer to 'struct spi_nor'. 1000 * @sr2: pointer to DMA-able buffer to write to the Status Register 2. 1001 * 1002 * Return: 0 on success, -errno otherwise. 1003 */ 1004 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) 1005 { 1006 int ret; 1007 1008 ret = spi_nor_write_enable(nor); 1009 if (ret) 1010 return ret; 1011 1012 if (nor->spimem) { 1013 struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2); 1014 1015 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1016 1017 ret = spi_mem_exec_op(nor->spimem, &op); 1018 } else { 1019 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2, 1020 sr2, 1); 1021 } 1022 1023 if (ret) { 1024 dev_dbg(nor->dev, "error %d writing SR2\n", ret); 1025 return ret; 1026 } 1027 1028 return spi_nor_wait_till_ready(nor); 1029 } 1030 1031 /** 1032 * spi_nor_read_sr2() - Read the Status Register 2 using the 1033 * SPINOR_OP_RDSR2 (3fh) command. 1034 * @nor: pointer to 'struct spi_nor'. 1035 * @sr2: pointer to DMA-able buffer where the value of the 1036 * Status Register 2 will be written. 1037 * 1038 * Return: 0 on success, -errno otherwise. 1039 */ 1040 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) 1041 { 1042 int ret; 1043 1044 if (nor->spimem) { 1045 struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2); 1046 1047 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1048 1049 ret = spi_mem_exec_op(nor->spimem, &op); 1050 } else { 1051 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2, 1052 1); 1053 } 1054 1055 if (ret) 1056 dev_dbg(nor->dev, "error %d reading SR2\n", ret); 1057 1058 return ret; 1059 } 1060 1061 /** 1062 * spi_nor_erase_die() - Erase the entire die. 1063 * @nor: pointer to 'struct spi_nor'. 1064 * @addr: address of the die. 1065 * @die_size: size of the die. 1066 * 1067 * Return: 0 on success, -errno otherwise. 1068 */ 1069 static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size) 1070 { 1071 bool multi_die = nor->mtd.size != die_size; 1072 int ret; 1073 1074 dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10)); 1075 1076 if (nor->spimem) { 1077 struct spi_mem_op op = 1078 SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode, 1079 nor->addr_nbytes, addr, multi_die); 1080 1081 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1082 1083 ret = spi_mem_exec_op(nor->spimem, &op); 1084 } else { 1085 if (multi_die) 1086 return -EOPNOTSUPP; 1087 1088 ret = spi_nor_controller_ops_write_reg(nor, 1089 SPINOR_OP_CHIP_ERASE, 1090 NULL, 0); 1091 } 1092 1093 if (ret) 1094 dev_dbg(nor->dev, "error %d erasing chip\n", ret); 1095 1096 return ret; 1097 } 1098 1099 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) 1100 { 1101 size_t i; 1102 1103 for (i = 0; i < size; i++) 1104 if (table[i][0] == opcode) 1105 return table[i][1]; 1106 1107 /* No conversion found, keep input op code. */ 1108 return opcode; 1109 } 1110 1111 u8 spi_nor_convert_3to4_read(u8 opcode) 1112 { 1113 static const u8 spi_nor_3to4_read[][2] = { 1114 { SPINOR_OP_READ, SPINOR_OP_READ_4B }, 1115 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, 1116 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, 1117 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, 1118 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, 1119 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, 1120 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, 1121 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, 1122 1123 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, 1124 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, 1125 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, 1126 }; 1127 1128 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, 1129 ARRAY_SIZE(spi_nor_3to4_read)); 1130 } 1131 1132 static u8 spi_nor_convert_3to4_program(u8 opcode) 1133 { 1134 static const u8 spi_nor_3to4_program[][2] = { 1135 { SPINOR_OP_PP, SPINOR_OP_PP_4B }, 1136 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, 1137 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, 1138 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, 1139 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, 1140 }; 1141 1142 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, 1143 ARRAY_SIZE(spi_nor_3to4_program)); 1144 } 1145 1146 static u8 spi_nor_convert_3to4_erase(u8 opcode) 1147 { 1148 static const u8 spi_nor_3to4_erase[][2] = { 1149 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, 1150 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, 1151 { SPINOR_OP_SE, SPINOR_OP_SE_4B }, 1152 }; 1153 1154 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, 1155 ARRAY_SIZE(spi_nor_3to4_erase)); 1156 } 1157 1158 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) 1159 { 1160 return !!nor->params->erase_map.uniform_region.erase_mask; 1161 } 1162 1163 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) 1164 { 1165 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); 1166 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); 1167 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); 1168 1169 if (!spi_nor_has_uniform_erase(nor)) { 1170 struct spi_nor_erase_map *map = &nor->params->erase_map; 1171 struct spi_nor_erase_type *erase; 1172 int i; 1173 1174 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1175 erase = &map->erase_type[i]; 1176 erase->opcode = 1177 spi_nor_convert_3to4_erase(erase->opcode); 1178 } 1179 } 1180 } 1181 1182 static int spi_nor_prep(struct spi_nor *nor) 1183 { 1184 int ret = 0; 1185 1186 if (nor->controller_ops && nor->controller_ops->prepare) 1187 ret = nor->controller_ops->prepare(nor); 1188 1189 return ret; 1190 } 1191 1192 static void spi_nor_unprep(struct spi_nor *nor) 1193 { 1194 if (nor->controller_ops && nor->controller_ops->unprepare) 1195 nor->controller_ops->unprepare(nor); 1196 } 1197 1198 static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len, 1199 u8 *first, u8 *last) 1200 { 1201 /* This is currently safe, the number of banks being very small */ 1202 *first = DIV_ROUND_DOWN_ULL(start, bank_size); 1203 *last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size); 1204 } 1205 1206 /* Generic helpers for internal locking and serialization */ 1207 static bool spi_nor_rww_start_io(struct spi_nor *nor) 1208 { 1209 struct spi_nor_rww *rww = &nor->rww; 1210 1211 guard(mutex)(&nor->lock); 1212 1213 if (rww->ongoing_io) 1214 return false; 1215 1216 rww->ongoing_io = true; 1217 1218 return true; 1219 } 1220 1221 static void spi_nor_rww_end_io(struct spi_nor *nor) 1222 { 1223 guard(mutex)(&nor->lock); 1224 nor->rww.ongoing_io = false; 1225 } 1226 1227 static int spi_nor_lock_device(struct spi_nor *nor) 1228 { 1229 if (!spi_nor_use_parallel_locking(nor)) 1230 return 0; 1231 1232 return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor)); 1233 } 1234 1235 static void spi_nor_unlock_device(struct spi_nor *nor) 1236 { 1237 if (spi_nor_use_parallel_locking(nor)) { 1238 spi_nor_rww_end_io(nor); 1239 wake_up(&nor->rww.wait); 1240 } 1241 } 1242 1243 /* Generic helpers for internal locking and serialization */ 1244 static bool spi_nor_rww_start_exclusive(struct spi_nor *nor) 1245 { 1246 struct spi_nor_rww *rww = &nor->rww; 1247 1248 mutex_lock(&nor->lock); 1249 1250 if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) 1251 return false; 1252 1253 rww->ongoing_io = true; 1254 rww->ongoing_rd = true; 1255 rww->ongoing_pe = true; 1256 1257 return true; 1258 } 1259 1260 static void spi_nor_rww_end_exclusive(struct spi_nor *nor) 1261 { 1262 struct spi_nor_rww *rww = &nor->rww; 1263 1264 guard(mutex)(&nor->lock); 1265 rww->ongoing_io = false; 1266 rww->ongoing_rd = false; 1267 rww->ongoing_pe = false; 1268 } 1269 1270 int spi_nor_prep_and_lock(struct spi_nor *nor) 1271 { 1272 int ret; 1273 1274 ret = spi_nor_prep(nor); 1275 if (ret) 1276 return ret; 1277 1278 if (!spi_nor_use_parallel_locking(nor)) 1279 mutex_lock(&nor->lock); 1280 else 1281 ret = wait_event_killable(nor->rww.wait, 1282 spi_nor_rww_start_exclusive(nor)); 1283 1284 return ret; 1285 } 1286 1287 void spi_nor_unlock_and_unprep(struct spi_nor *nor) 1288 { 1289 if (!spi_nor_use_parallel_locking(nor)) { 1290 mutex_unlock(&nor->lock); 1291 } else { 1292 spi_nor_rww_end_exclusive(nor); 1293 wake_up(&nor->rww.wait); 1294 } 1295 1296 spi_nor_unprep(nor); 1297 } 1298 1299 /* Internal locking helpers for program and erase operations */ 1300 static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len) 1301 { 1302 struct spi_nor_rww *rww = &nor->rww; 1303 unsigned int used_banks = 0; 1304 u8 first, last; 1305 int bank; 1306 1307 guard(mutex)(&nor->lock); 1308 1309 if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) 1310 return false; 1311 1312 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1313 for (bank = first; bank <= last; bank++) { 1314 if (rww->used_banks & BIT(bank)) 1315 return false; 1316 1317 used_banks |= BIT(bank); 1318 } 1319 1320 rww->used_banks |= used_banks; 1321 rww->ongoing_pe = true; 1322 1323 return true; 1324 } 1325 1326 static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len) 1327 { 1328 struct spi_nor_rww *rww = &nor->rww; 1329 u8 first, last; 1330 int bank; 1331 1332 guard(mutex)(&nor->lock); 1333 1334 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1335 for (bank = first; bank <= last; bank++) 1336 rww->used_banks &= ~BIT(bank); 1337 1338 rww->ongoing_pe = false; 1339 } 1340 1341 static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len) 1342 { 1343 int ret; 1344 1345 ret = spi_nor_prep(nor); 1346 if (ret) 1347 return ret; 1348 1349 if (!spi_nor_use_parallel_locking(nor)) 1350 mutex_lock(&nor->lock); 1351 else 1352 ret = wait_event_killable(nor->rww.wait, 1353 spi_nor_rww_start_pe(nor, start, len)); 1354 1355 return ret; 1356 } 1357 1358 static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len) 1359 { 1360 if (!spi_nor_use_parallel_locking(nor)) { 1361 mutex_unlock(&nor->lock); 1362 } else { 1363 spi_nor_rww_end_pe(nor, start, len); 1364 wake_up(&nor->rww.wait); 1365 } 1366 1367 spi_nor_unprep(nor); 1368 } 1369 1370 /* Internal locking helpers for read operations */ 1371 static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len) 1372 { 1373 struct spi_nor_rww *rww = &nor->rww; 1374 unsigned int used_banks = 0; 1375 u8 first, last; 1376 int bank; 1377 1378 guard(mutex)(&nor->lock); 1379 1380 if (rww->ongoing_io || rww->ongoing_rd) 1381 return false; 1382 1383 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1384 for (bank = first; bank <= last; bank++) { 1385 if (rww->used_banks & BIT(bank)) 1386 return false; 1387 1388 used_banks |= BIT(bank); 1389 } 1390 1391 rww->used_banks |= used_banks; 1392 rww->ongoing_io = true; 1393 rww->ongoing_rd = true; 1394 1395 return true; 1396 } 1397 1398 static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len) 1399 { 1400 struct spi_nor_rww *rww = &nor->rww; 1401 u8 first, last; 1402 int bank; 1403 1404 guard(mutex)(&nor->lock); 1405 1406 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1407 for (bank = first; bank <= last; bank++) 1408 nor->rww.used_banks &= ~BIT(bank); 1409 1410 rww->ongoing_io = false; 1411 rww->ongoing_rd = false; 1412 } 1413 1414 static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len) 1415 { 1416 int ret; 1417 1418 ret = spi_nor_prep(nor); 1419 if (ret) 1420 return ret; 1421 1422 if (!spi_nor_use_parallel_locking(nor)) 1423 mutex_lock(&nor->lock); 1424 else 1425 ret = wait_event_killable(nor->rww.wait, 1426 spi_nor_rww_start_rd(nor, start, len)); 1427 1428 return ret; 1429 } 1430 1431 static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len) 1432 { 1433 if (!spi_nor_use_parallel_locking(nor)) { 1434 mutex_unlock(&nor->lock); 1435 } else { 1436 spi_nor_rww_end_rd(nor, start, len); 1437 wake_up(&nor->rww.wait); 1438 } 1439 1440 spi_nor_unprep(nor); 1441 } 1442 1443 /* 1444 * Initiate the erasure of a single sector 1445 */ 1446 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) 1447 { 1448 int i; 1449 1450 if (nor->spimem) { 1451 struct spi_mem_op op = 1452 SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode, 1453 nor->addr_nbytes, addr); 1454 1455 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1456 1457 return spi_mem_exec_op(nor->spimem, &op); 1458 } else if (nor->controller_ops->erase) { 1459 return spi_nor_controller_ops_erase(nor, addr); 1460 } 1461 1462 /* 1463 * Default implementation, if driver doesn't have a specialized HW 1464 * control 1465 */ 1466 for (i = nor->addr_nbytes - 1; i >= 0; i--) { 1467 nor->bouncebuf[i] = addr & 0xff; 1468 addr >>= 8; 1469 } 1470 1471 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode, 1472 nor->bouncebuf, nor->addr_nbytes); 1473 } 1474 1475 /** 1476 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend 1477 * @erase: pointer to a structure that describes a SPI NOR erase type 1478 * @dividend: dividend value 1479 * @remainder: pointer to u32 remainder (will be updated) 1480 * 1481 * Return: the result of the division 1482 */ 1483 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase, 1484 u64 dividend, u32 *remainder) 1485 { 1486 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 1487 *remainder = (u32)dividend & erase->size_mask; 1488 return dividend >> erase->size_shift; 1489 } 1490 1491 /** 1492 * spi_nor_find_best_erase_type() - find the best erase type for the given 1493 * offset in the serial flash memory and the 1494 * number of bytes to erase. The region in 1495 * which the address fits is expected to be 1496 * provided. 1497 * @map: the erase map of the SPI NOR 1498 * @region: pointer to a structure that describes a SPI NOR erase region 1499 * @addr: offset in the serial flash memory 1500 * @len: number of bytes to erase 1501 * 1502 * Return: a pointer to the best fitted erase type, NULL otherwise. 1503 */ 1504 static const struct spi_nor_erase_type * 1505 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, 1506 const struct spi_nor_erase_region *region, 1507 u64 addr, u32 len) 1508 { 1509 const struct spi_nor_erase_type *erase; 1510 u32 rem; 1511 int i; 1512 1513 /* 1514 * Erase types are ordered by size, with the smallest erase type at 1515 * index 0. 1516 */ 1517 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 1518 /* Does the erase region support the tested erase type? */ 1519 if (!(region->erase_mask & BIT(i))) 1520 continue; 1521 1522 erase = &map->erase_type[i]; 1523 if (!erase->size) 1524 continue; 1525 1526 /* Alignment is not mandatory for overlaid regions */ 1527 if (region->overlaid && region->size <= len) 1528 return erase; 1529 1530 /* Don't erase more than what the user has asked for. */ 1531 if (erase->size > len) 1532 continue; 1533 1534 spi_nor_div_by_erase_size(erase, addr, &rem); 1535 if (!rem) 1536 return erase; 1537 } 1538 1539 return NULL; 1540 } 1541 1542 /** 1543 * spi_nor_init_erase_cmd() - initialize an erase command 1544 * @region: pointer to a structure that describes a SPI NOR erase region 1545 * @erase: pointer to a structure that describes a SPI NOR erase type 1546 * 1547 * Return: the pointer to the allocated erase command, ERR_PTR(-errno) 1548 * otherwise. 1549 */ 1550 static struct spi_nor_erase_command * 1551 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region, 1552 const struct spi_nor_erase_type *erase) 1553 { 1554 struct spi_nor_erase_command *cmd; 1555 1556 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 1557 if (!cmd) 1558 return ERR_PTR(-ENOMEM); 1559 1560 INIT_LIST_HEAD(&cmd->list); 1561 cmd->opcode = erase->opcode; 1562 cmd->count = 1; 1563 1564 if (region->overlaid) 1565 cmd->size = region->size; 1566 else 1567 cmd->size = erase->size; 1568 1569 return cmd; 1570 } 1571 1572 /** 1573 * spi_nor_destroy_erase_cmd_list() - destroy erase command list 1574 * @erase_list: list of erase commands 1575 */ 1576 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list) 1577 { 1578 struct spi_nor_erase_command *cmd, *next; 1579 1580 list_for_each_entry_safe(cmd, next, erase_list, list) { 1581 list_del(&cmd->list); 1582 kfree(cmd); 1583 } 1584 } 1585 1586 /** 1587 * spi_nor_init_erase_cmd_list() - initialize erase command list 1588 * @nor: pointer to a 'struct spi_nor' 1589 * @erase_list: list of erase commands to be executed once we validate that the 1590 * erase can be performed 1591 * @addr: offset in the serial flash memory 1592 * @len: number of bytes to erase 1593 * 1594 * Builds the list of best fitted erase commands and verifies if the erase can 1595 * be performed. 1596 * 1597 * Return: 0 on success, -errno otherwise. 1598 */ 1599 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, 1600 struct list_head *erase_list, 1601 u64 addr, u32 len) 1602 { 1603 const struct spi_nor_erase_map *map = &nor->params->erase_map; 1604 const struct spi_nor_erase_type *erase, *prev_erase = NULL; 1605 struct spi_nor_erase_region *region; 1606 struct spi_nor_erase_command *cmd = NULL; 1607 u64 region_end; 1608 unsigned int i; 1609 int ret = -EINVAL; 1610 1611 for (i = 0; i < map->n_regions && len; i++) { 1612 region = &map->regions[i]; 1613 region_end = region->offset + region->size; 1614 1615 while (len && addr >= region->offset && addr < region_end) { 1616 erase = spi_nor_find_best_erase_type(map, region, addr, 1617 len); 1618 if (!erase) 1619 goto destroy_erase_cmd_list; 1620 1621 if (prev_erase != erase || erase->size != cmd->size || 1622 region->overlaid) { 1623 cmd = spi_nor_init_erase_cmd(region, erase); 1624 if (IS_ERR(cmd)) { 1625 ret = PTR_ERR(cmd); 1626 goto destroy_erase_cmd_list; 1627 } 1628 1629 list_add_tail(&cmd->list, erase_list); 1630 } else { 1631 cmd->count++; 1632 } 1633 1634 len -= cmd->size; 1635 addr += cmd->size; 1636 prev_erase = erase; 1637 } 1638 } 1639 1640 return 0; 1641 1642 destroy_erase_cmd_list: 1643 spi_nor_destroy_erase_cmd_list(erase_list); 1644 return ret; 1645 } 1646 1647 /** 1648 * spi_nor_erase_multi_sectors() - perform a non-uniform erase 1649 * @nor: pointer to a 'struct spi_nor' 1650 * @addr: offset in the serial flash memory 1651 * @len: number of bytes to erase 1652 * 1653 * Build a list of best fitted erase commands and execute it once we validate 1654 * that the erase can be performed. 1655 * 1656 * Return: 0 on success, -errno otherwise. 1657 */ 1658 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) 1659 { 1660 LIST_HEAD(erase_list); 1661 struct spi_nor_erase_command *cmd, *next; 1662 int ret; 1663 1664 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len); 1665 if (ret) 1666 return ret; 1667 1668 list_for_each_entry_safe(cmd, next, &erase_list, list) { 1669 nor->erase_opcode = cmd->opcode; 1670 while (cmd->count) { 1671 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n", 1672 cmd->size, cmd->opcode, cmd->count); 1673 1674 ret = spi_nor_lock_device(nor); 1675 if (ret) 1676 goto destroy_erase_cmd_list; 1677 1678 ret = spi_nor_write_enable(nor); 1679 if (ret) { 1680 spi_nor_unlock_device(nor); 1681 goto destroy_erase_cmd_list; 1682 } 1683 1684 ret = spi_nor_erase_sector(nor, addr); 1685 spi_nor_unlock_device(nor); 1686 if (ret) 1687 goto destroy_erase_cmd_list; 1688 1689 ret = spi_nor_wait_till_ready(nor); 1690 if (ret) 1691 goto destroy_erase_cmd_list; 1692 1693 addr += cmd->size; 1694 cmd->count--; 1695 } 1696 list_del(&cmd->list); 1697 kfree(cmd); 1698 } 1699 1700 return 0; 1701 1702 destroy_erase_cmd_list: 1703 spi_nor_destroy_erase_cmd_list(&erase_list); 1704 return ret; 1705 } 1706 1707 static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr, 1708 size_t len, size_t die_size) 1709 { 1710 unsigned long timeout; 1711 int ret; 1712 1713 /* 1714 * Scale the timeout linearly with the size of the flash, with 1715 * a minimum calibrated to an old 2MB flash. We could try to 1716 * pull these from CFI/SFDP, but these values should be good 1717 * enough for now. 1718 */ 1719 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, 1720 CHIP_ERASE_2MB_READY_WAIT_JIFFIES * 1721 (unsigned long)(nor->mtd.size / SZ_2M)); 1722 1723 do { 1724 ret = spi_nor_lock_device(nor); 1725 if (ret) 1726 return ret; 1727 1728 ret = spi_nor_write_enable(nor); 1729 if (ret) { 1730 spi_nor_unlock_device(nor); 1731 return ret; 1732 } 1733 1734 ret = spi_nor_erase_die(nor, addr, die_size); 1735 1736 spi_nor_unlock_device(nor); 1737 if (ret) 1738 return ret; 1739 1740 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); 1741 if (ret) 1742 return ret; 1743 1744 addr += die_size; 1745 len -= die_size; 1746 1747 } while (len); 1748 1749 return 0; 1750 } 1751 1752 /* 1753 * Erase an address range on the nor chip. The address range may extend 1754 * one or more erase sectors. Return an error if there is a problem erasing. 1755 */ 1756 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) 1757 { 1758 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1759 u8 n_dice = nor->params->n_dice; 1760 bool multi_die_erase = false; 1761 u32 addr, len, rem; 1762 size_t die_size; 1763 int ret; 1764 1765 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, 1766 (long long)instr->len); 1767 1768 if (spi_nor_has_uniform_erase(nor)) { 1769 div_u64_rem(instr->len, mtd->erasesize, &rem); 1770 if (rem) 1771 return -EINVAL; 1772 } 1773 1774 addr = instr->addr; 1775 len = instr->len; 1776 1777 if (n_dice) { 1778 die_size = div_u64(mtd->size, n_dice); 1779 if (!(len & (die_size - 1)) && !(addr & (die_size - 1))) 1780 multi_die_erase = true; 1781 } else { 1782 die_size = mtd->size; 1783 } 1784 1785 ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len); 1786 if (ret) 1787 return ret; 1788 1789 /* chip (die) erase? */ 1790 if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) || 1791 multi_die_erase) { 1792 ret = spi_nor_erase_dice(nor, addr, len, die_size); 1793 if (ret) 1794 goto erase_err; 1795 1796 /* REVISIT in some cases we could speed up erasing large regions 1797 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up 1798 * to use "small sector erase", but that's not always optimal. 1799 */ 1800 1801 /* "sector"-at-a-time erase */ 1802 } else if (spi_nor_has_uniform_erase(nor)) { 1803 while (len) { 1804 ret = spi_nor_lock_device(nor); 1805 if (ret) 1806 goto erase_err; 1807 1808 ret = spi_nor_write_enable(nor); 1809 if (ret) { 1810 spi_nor_unlock_device(nor); 1811 goto erase_err; 1812 } 1813 1814 ret = spi_nor_erase_sector(nor, addr); 1815 spi_nor_unlock_device(nor); 1816 if (ret) 1817 goto erase_err; 1818 1819 ret = spi_nor_wait_till_ready(nor); 1820 if (ret) 1821 goto erase_err; 1822 1823 addr += mtd->erasesize; 1824 len -= mtd->erasesize; 1825 } 1826 1827 /* erase multiple sectors */ 1828 } else { 1829 ret = spi_nor_erase_multi_sectors(nor, addr, len); 1830 if (ret) 1831 goto erase_err; 1832 } 1833 1834 ret = spi_nor_write_disable(nor); 1835 1836 erase_err: 1837 spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len); 1838 1839 return ret; 1840 } 1841 1842 /** 1843 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status 1844 * Register 1. 1845 * @nor: pointer to a 'struct spi_nor' 1846 * 1847 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. 1848 * 1849 * Return: 0 on success, -errno otherwise. 1850 */ 1851 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) 1852 { 1853 int ret; 1854 1855 ret = spi_nor_read_sr(nor, nor->bouncebuf); 1856 if (ret) 1857 return ret; 1858 1859 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) 1860 return 0; 1861 1862 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; 1863 1864 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); 1865 } 1866 1867 /** 1868 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status 1869 * Register 2. 1870 * @nor: pointer to a 'struct spi_nor'. 1871 * 1872 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. 1873 * 1874 * Return: 0 on success, -errno otherwise. 1875 */ 1876 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) 1877 { 1878 int ret; 1879 1880 if (nor->flags & SNOR_F_NO_READ_CR) 1881 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); 1882 1883 ret = spi_nor_read_cr(nor, nor->bouncebuf); 1884 if (ret) 1885 return ret; 1886 1887 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) 1888 return 0; 1889 1890 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; 1891 1892 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); 1893 } 1894 1895 /** 1896 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. 1897 * @nor: pointer to a 'struct spi_nor' 1898 * 1899 * Set the Quad Enable (QE) bit in the Status Register 2. 1900 * 1901 * This is one of the procedures to set the QE bit described in the SFDP 1902 * (JESD216 rev B) specification but no manufacturer using this procedure has 1903 * been identified yet, hence the name of the function. 1904 * 1905 * Return: 0 on success, -errno otherwise. 1906 */ 1907 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) 1908 { 1909 u8 *sr2 = nor->bouncebuf; 1910 int ret; 1911 u8 sr2_written; 1912 1913 /* Check current Quad Enable bit value. */ 1914 ret = spi_nor_read_sr2(nor, sr2); 1915 if (ret) 1916 return ret; 1917 if (*sr2 & SR2_QUAD_EN_BIT7) 1918 return 0; 1919 1920 /* Update the Quad Enable bit. */ 1921 *sr2 |= SR2_QUAD_EN_BIT7; 1922 1923 ret = spi_nor_write_sr2(nor, sr2); 1924 if (ret) 1925 return ret; 1926 1927 sr2_written = *sr2; 1928 1929 /* Read back and check it. */ 1930 ret = spi_nor_read_sr2(nor, sr2); 1931 if (ret) 1932 return ret; 1933 1934 if (*sr2 != sr2_written) { 1935 dev_dbg(nor->dev, "SR2: Read back test failed\n"); 1936 return -EIO; 1937 } 1938 1939 return 0; 1940 } 1941 1942 static const struct spi_nor_manufacturer *manufacturers[] = { 1943 &spi_nor_atmel, 1944 &spi_nor_eon, 1945 &spi_nor_esmt, 1946 &spi_nor_everspin, 1947 &spi_nor_gigadevice, 1948 &spi_nor_intel, 1949 &spi_nor_issi, 1950 &spi_nor_macronix, 1951 &spi_nor_micron, 1952 &spi_nor_st, 1953 &spi_nor_spansion, 1954 &spi_nor_sst, 1955 &spi_nor_winbond, 1956 &spi_nor_xmc, 1957 }; 1958 1959 static const struct flash_info spi_nor_generic_flash = { 1960 .name = "spi-nor-generic", 1961 }; 1962 1963 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor, 1964 const u8 *id) 1965 { 1966 const struct flash_info *part; 1967 unsigned int i, j; 1968 1969 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 1970 for (j = 0; j < manufacturers[i]->nparts; j++) { 1971 part = &manufacturers[i]->parts[j]; 1972 if (part->id && 1973 !memcmp(part->id->bytes, id, part->id->len)) { 1974 nor->manufacturer = manufacturers[i]; 1975 return part; 1976 } 1977 } 1978 } 1979 1980 return NULL; 1981 } 1982 1983 static const struct flash_info *spi_nor_detect(struct spi_nor *nor) 1984 { 1985 const struct flash_info *info; 1986 u8 *id = nor->bouncebuf; 1987 int ret; 1988 1989 ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto); 1990 if (ret) { 1991 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret); 1992 return ERR_PTR(ret); 1993 } 1994 1995 /* Cache the complete flash ID. */ 1996 nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL); 1997 if (!nor->id) 1998 return ERR_PTR(-ENOMEM); 1999 2000 info = spi_nor_match_id(nor, id); 2001 2002 /* Fallback to a generic flash described only by its SFDP data. */ 2003 if (!info) { 2004 ret = spi_nor_check_sfdp_signature(nor); 2005 if (!ret) 2006 info = &spi_nor_generic_flash; 2007 } 2008 2009 if (!info) { 2010 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n", 2011 SPI_NOR_MAX_ID_LEN, id); 2012 return ERR_PTR(-ENODEV); 2013 } 2014 return info; 2015 } 2016 2017 /* 2018 * On Octal DTR capable flashes, reads cannot start or end at an odd 2019 * address in Octal DTR mode. Extra bytes need to be read at the start 2020 * or end to make sure both the start address and length remain even. 2021 */ 2022 static int spi_nor_octal_dtr_read(struct spi_nor *nor, loff_t from, size_t len, 2023 u_char *buf) 2024 { 2025 u_char *tmp_buf; 2026 size_t tmp_len; 2027 loff_t start, end; 2028 int ret, bytes_read; 2029 2030 if (IS_ALIGNED(from, 2) && IS_ALIGNED(len, 2)) 2031 return spi_nor_read_data(nor, from, len, buf); 2032 else if (IS_ALIGNED(from, 2) && len > PAGE_SIZE) 2033 return spi_nor_read_data(nor, from, round_down(len, PAGE_SIZE), 2034 buf); 2035 2036 tmp_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 2037 if (!tmp_buf) 2038 return -ENOMEM; 2039 2040 start = round_down(from, 2); 2041 end = round_up(from + len, 2); 2042 2043 /* 2044 * Avoid allocating too much memory. The requested read length might be 2045 * quite large. Allocating a buffer just as large (slightly bigger, in 2046 * fact) would put unnecessary memory pressure on the system. 2047 * 2048 * For example if the read is from 3 to 1M, then this will read from 2 2049 * to 4098. The reads from 4098 to 1M will then not need a temporary 2050 * buffer so they can proceed as normal. 2051 */ 2052 tmp_len = min_t(size_t, end - start, PAGE_SIZE); 2053 2054 ret = spi_nor_read_data(nor, start, tmp_len, tmp_buf); 2055 if (ret == 0) { 2056 ret = -EIO; 2057 goto out; 2058 } 2059 if (ret < 0) 2060 goto out; 2061 2062 /* 2063 * More bytes are read than actually requested, but that number can't be 2064 * reported to the calling function or it will confuse its calculations. 2065 * Calculate how many of the _requested_ bytes were read. 2066 */ 2067 bytes_read = ret; 2068 2069 if (from != start) 2070 ret -= from - start; 2071 2072 /* 2073 * Only account for extra bytes at the end if they were actually read. 2074 * For example, if the total length was truncated because of temporary 2075 * buffer size limit then the adjustment for the extra bytes at the end 2076 * is not needed. 2077 */ 2078 if (start + bytes_read == end) 2079 ret -= end - (from + len); 2080 2081 memcpy(buf, tmp_buf + (from - start), ret); 2082 out: 2083 kfree(tmp_buf); 2084 return ret; 2085 } 2086 2087 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, 2088 size_t *retlen, u_char *buf) 2089 { 2090 struct spi_nor *nor = mtd_to_spi_nor(mtd); 2091 loff_t from_lock = from; 2092 size_t len_lock = len; 2093 ssize_t ret; 2094 2095 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); 2096 2097 ret = spi_nor_prep_and_lock_rd(nor, from_lock, len_lock); 2098 if (ret) 2099 return ret; 2100 2101 while (len) { 2102 loff_t addr = from; 2103 2104 if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) 2105 ret = spi_nor_octal_dtr_read(nor, addr, len, buf); 2106 else 2107 ret = spi_nor_read_data(nor, addr, len, buf); 2108 2109 if (ret == 0) { 2110 /* We shouldn't see 0-length reads */ 2111 ret = -EIO; 2112 goto read_err; 2113 } 2114 if (ret < 0) 2115 goto read_err; 2116 2117 WARN_ON(ret > len); 2118 *retlen += ret; 2119 buf += ret; 2120 from += ret; 2121 len -= ret; 2122 } 2123 ret = 0; 2124 2125 read_err: 2126 spi_nor_unlock_and_unprep_rd(nor, from_lock, len_lock); 2127 2128 return ret; 2129 } 2130 2131 /* 2132 * On Octal DTR capable flashes, writes cannot start or end at an odd address 2133 * in Octal DTR mode. Extra 0xff bytes need to be appended or prepended to 2134 * make sure the start address and end address are even. 0xff is used because 2135 * on NOR flashes a program operation can only flip bits from 1 to 0, not the 2136 * other way round. 0 to 1 flip needs to happen via erases. 2137 */ 2138 static int spi_nor_octal_dtr_write(struct spi_nor *nor, loff_t to, size_t len, 2139 const u8 *buf) 2140 { 2141 u8 *tmp_buf; 2142 size_t bytes_written; 2143 loff_t start, end; 2144 int ret; 2145 2146 if (IS_ALIGNED(to, 2) && IS_ALIGNED(len, 2)) 2147 return spi_nor_write_data(nor, to, len, buf); 2148 2149 tmp_buf = kmalloc(nor->params->page_size, GFP_KERNEL); 2150 if (!tmp_buf) 2151 return -ENOMEM; 2152 2153 memset(tmp_buf, 0xff, nor->params->page_size); 2154 2155 start = round_down(to, 2); 2156 end = round_up(to + len, 2); 2157 2158 memcpy(tmp_buf + (to - start), buf, len); 2159 2160 ret = spi_nor_write_data(nor, start, end - start, tmp_buf); 2161 if (ret == 0) { 2162 ret = -EIO; 2163 goto out; 2164 } 2165 if (ret < 0) 2166 goto out; 2167 2168 /* 2169 * More bytes are written than actually requested, but that number can't 2170 * be reported to the calling function or it will confuse its 2171 * calculations. Calculate how many of the _requested_ bytes were 2172 * written. 2173 */ 2174 bytes_written = ret; 2175 2176 if (to != start) 2177 ret -= to - start; 2178 2179 /* 2180 * Only account for extra bytes at the end if they were actually 2181 * written. For example, if for some reason the controller could only 2182 * complete a partial write then the adjustment for the extra bytes at 2183 * the end is not needed. 2184 */ 2185 if (start + bytes_written == end) 2186 ret -= end - (to + len); 2187 2188 out: 2189 kfree(tmp_buf); 2190 return ret; 2191 } 2192 2193 /* 2194 * Write an address range to the nor chip. Data must be written in 2195 * FLASH_PAGESIZE chunks. The address range may be any size provided 2196 * it is within the physical boundaries. 2197 */ 2198 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, 2199 size_t *retlen, const u_char *buf) 2200 { 2201 struct spi_nor *nor = mtd_to_spi_nor(mtd); 2202 size_t i; 2203 ssize_t ret; 2204 u32 page_size = nor->params->page_size; 2205 2206 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); 2207 2208 ret = spi_nor_prep_and_lock_pe(nor, to, len); 2209 if (ret) 2210 return ret; 2211 2212 for (i = 0; i < len; ) { 2213 ssize_t written; 2214 loff_t addr = to + i; 2215 size_t page_offset = addr & (page_size - 1); 2216 /* the size of data remaining on the first page */ 2217 size_t page_remain = min_t(size_t, page_size - page_offset, len - i); 2218 2219 ret = spi_nor_lock_device(nor); 2220 if (ret) 2221 goto write_err; 2222 2223 ret = spi_nor_write_enable(nor); 2224 if (ret) { 2225 spi_nor_unlock_device(nor); 2226 goto write_err; 2227 } 2228 2229 if (nor->write_proto == SNOR_PROTO_8_8_8_DTR) 2230 ret = spi_nor_octal_dtr_write(nor, addr, page_remain, 2231 buf + i); 2232 else 2233 ret = spi_nor_write_data(nor, addr, page_remain, 2234 buf + i); 2235 spi_nor_unlock_device(nor); 2236 if (ret < 0) 2237 goto write_err; 2238 written = ret; 2239 2240 ret = spi_nor_wait_till_ready(nor); 2241 if (ret) 2242 goto write_err; 2243 *retlen += written; 2244 i += written; 2245 } 2246 2247 write_err: 2248 spi_nor_unlock_and_unprep_pe(nor, to, len); 2249 2250 return ret; 2251 } 2252 2253 static int spi_nor_check(struct spi_nor *nor) 2254 { 2255 if (!nor->dev || 2256 (!nor->spimem && !nor->controller_ops) || 2257 (!nor->spimem && nor->controller_ops && 2258 (!nor->controller_ops->read || 2259 !nor->controller_ops->write || 2260 !nor->controller_ops->read_reg || 2261 !nor->controller_ops->write_reg))) { 2262 pr_err("spi-nor: please fill all the necessary fields!\n"); 2263 return -EINVAL; 2264 } 2265 2266 if (nor->spimem && nor->controller_ops) { 2267 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n"); 2268 return -EINVAL; 2269 } 2270 2271 return 0; 2272 } 2273 2274 void 2275 spi_nor_set_read_settings(struct spi_nor_read_command *read, 2276 u8 num_mode_clocks, 2277 u8 num_wait_states, 2278 u8 opcode, 2279 enum spi_nor_protocol proto) 2280 { 2281 read->num_mode_clocks = num_mode_clocks; 2282 read->num_wait_states = num_wait_states; 2283 read->opcode = opcode; 2284 read->proto = proto; 2285 } 2286 2287 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, 2288 enum spi_nor_protocol proto) 2289 { 2290 pp->opcode = opcode; 2291 pp->proto = proto; 2292 } 2293 2294 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) 2295 { 2296 size_t i; 2297 2298 for (i = 0; i < size; i++) 2299 if (table[i][0] == (int)hwcaps) 2300 return table[i][1]; 2301 2302 return -EINVAL; 2303 } 2304 2305 int spi_nor_hwcaps_read2cmd(u32 hwcaps) 2306 { 2307 static const int hwcaps_read2cmd[][2] = { 2308 { SNOR_HWCAPS_READ, SNOR_CMD_READ }, 2309 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, 2310 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, 2311 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, 2312 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, 2313 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, 2314 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, 2315 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, 2316 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, 2317 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, 2318 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, 2319 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, 2320 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, 2321 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, 2322 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, 2323 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR }, 2324 }; 2325 2326 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, 2327 ARRAY_SIZE(hwcaps_read2cmd)); 2328 } 2329 2330 int spi_nor_hwcaps_pp2cmd(u32 hwcaps) 2331 { 2332 static const int hwcaps_pp2cmd[][2] = { 2333 { SNOR_HWCAPS_PP, SNOR_CMD_PP }, 2334 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, 2335 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, 2336 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, 2337 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, 2338 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, 2339 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, 2340 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR }, 2341 }; 2342 2343 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, 2344 ARRAY_SIZE(hwcaps_pp2cmd)); 2345 } 2346 2347 /** 2348 * spi_nor_spimem_check_op - check if the operation is supported 2349 * by controller 2350 *@nor: pointer to a 'struct spi_nor' 2351 *@op: pointer to op template to be checked 2352 * 2353 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2354 */ 2355 static int spi_nor_spimem_check_op(struct spi_nor *nor, 2356 struct spi_mem_op *op) 2357 { 2358 /* 2359 * First test with 4 address bytes. The opcode itself might 2360 * be a 3B addressing opcode but we don't care, because 2361 * SPI controller implementation should not check the opcode, 2362 * but just the sequence. 2363 */ 2364 op->addr.nbytes = 4; 2365 if (!spi_mem_supports_op(nor->spimem, op)) { 2366 if (nor->params->size > SZ_16M) 2367 return -EOPNOTSUPP; 2368 2369 /* If flash size <= 16MB, 3 address bytes are sufficient */ 2370 op->addr.nbytes = 3; 2371 if (!spi_mem_supports_op(nor->spimem, op)) 2372 return -EOPNOTSUPP; 2373 } 2374 2375 return 0; 2376 } 2377 2378 /** 2379 * spi_nor_spimem_check_readop - check if the read op is supported 2380 * by controller 2381 *@nor: pointer to a 'struct spi_nor' 2382 *@read: pointer to op template to be checked 2383 * 2384 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2385 */ 2386 static int spi_nor_spimem_check_readop(struct spi_nor *nor, 2387 const struct spi_nor_read_command *read) 2388 { 2389 struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode); 2390 2391 spi_nor_spimem_setup_op(nor, &op, read->proto); 2392 2393 /* convert the dummy cycles to the number of bytes */ 2394 op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) * 2395 op.dummy.buswidth / 8; 2396 if (spi_nor_protocol_is_dtr(nor->read_proto)) 2397 op.dummy.nbytes *= 2; 2398 2399 return spi_nor_spimem_check_op(nor, &op); 2400 } 2401 2402 /** 2403 * spi_nor_spimem_check_pp - check if the page program op is supported 2404 * by controller 2405 *@nor: pointer to a 'struct spi_nor' 2406 *@pp: pointer to op template to be checked 2407 * 2408 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2409 */ 2410 static int spi_nor_spimem_check_pp(struct spi_nor *nor, 2411 const struct spi_nor_pp_command *pp) 2412 { 2413 struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode); 2414 2415 spi_nor_spimem_setup_op(nor, &op, pp->proto); 2416 2417 return spi_nor_spimem_check_op(nor, &op); 2418 } 2419 2420 /** 2421 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol 2422 * based on SPI controller capabilities 2423 * @nor: pointer to a 'struct spi_nor' 2424 * @hwcaps: pointer to resulting capabilities after adjusting 2425 * according to controller and flash's capability 2426 */ 2427 static void 2428 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) 2429 { 2430 struct spi_nor_flash_parameter *params = nor->params; 2431 unsigned int cap; 2432 2433 /* X-X-X modes are not supported yet, mask them all. */ 2434 *hwcaps &= ~SNOR_HWCAPS_X_X_X; 2435 2436 /* 2437 * If the reset line is broken, we do not want to enter a stateful 2438 * mode. 2439 */ 2440 if (nor->flags & SNOR_F_BROKEN_RESET) 2441 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR); 2442 2443 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) { 2444 int rdidx, ppidx; 2445 2446 if (!(*hwcaps & BIT(cap))) 2447 continue; 2448 2449 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap)); 2450 if (rdidx >= 0 && 2451 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx])) 2452 *hwcaps &= ~BIT(cap); 2453 2454 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap)); 2455 if (ppidx < 0) 2456 continue; 2457 2458 if (spi_nor_spimem_check_pp(nor, 2459 ¶ms->page_programs[ppidx])) 2460 *hwcaps &= ~BIT(cap); 2461 } 2462 2463 /* Some SPI controllers might not support CR read opcode. */ 2464 if (!(nor->flags & SNOR_F_NO_READ_CR)) { 2465 struct spi_mem_op op = SPI_NOR_RDCR_OP(nor->bouncebuf); 2466 2467 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 2468 2469 if (spi_nor_spimem_check_op(nor, &op)) 2470 nor->flags |= SNOR_F_NO_READ_CR; 2471 } 2472 } 2473 2474 /** 2475 * spi_nor_set_erase_type() - set a SPI NOR erase type 2476 * @erase: pointer to a structure that describes a SPI NOR erase type 2477 * @size: the size of the sector/block erased by the erase type 2478 * @opcode: the SPI command op code to erase the sector/block 2479 */ 2480 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, 2481 u8 opcode) 2482 { 2483 erase->size = size; 2484 erase->opcode = opcode; 2485 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 2486 erase->size_shift = ffs(erase->size) - 1; 2487 erase->size_mask = (1 << erase->size_shift) - 1; 2488 } 2489 2490 /** 2491 * spi_nor_mask_erase_type() - mask out a SPI NOR erase type 2492 * @erase: pointer to a structure that describes a SPI NOR erase type 2493 */ 2494 void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase) 2495 { 2496 erase->size = 0; 2497 } 2498 2499 /** 2500 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map 2501 * @map: the erase map of the SPI NOR 2502 * @erase_mask: bitmask encoding erase types that can erase the entire 2503 * flash memory 2504 * @flash_size: the spi nor flash memory size 2505 */ 2506 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, 2507 u8 erase_mask, u64 flash_size) 2508 { 2509 map->uniform_region.offset = 0; 2510 map->uniform_region.size = flash_size; 2511 map->uniform_region.erase_mask = erase_mask; 2512 map->regions = &map->uniform_region; 2513 map->n_regions = 1; 2514 } 2515 2516 int spi_nor_post_bfpt_fixups(struct spi_nor *nor, 2517 const struct sfdp_parameter_header *bfpt_header, 2518 const struct sfdp_bfpt *bfpt) 2519 { 2520 int ret; 2521 2522 if (nor->manufacturer && nor->manufacturer->fixups && 2523 nor->manufacturer->fixups->post_bfpt) { 2524 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header, 2525 bfpt); 2526 if (ret) 2527 return ret; 2528 } 2529 2530 if (nor->info->fixups && nor->info->fixups->post_bfpt) 2531 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt); 2532 2533 return 0; 2534 } 2535 2536 static int spi_nor_select_read(struct spi_nor *nor, 2537 u32 shared_hwcaps) 2538 { 2539 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; 2540 const struct spi_nor_read_command *read; 2541 2542 if (best_match < 0) 2543 return -EINVAL; 2544 2545 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); 2546 if (cmd < 0) 2547 return -EINVAL; 2548 2549 read = &nor->params->reads[cmd]; 2550 nor->read_opcode = read->opcode; 2551 nor->read_proto = read->proto; 2552 2553 /* 2554 * In the SPI NOR framework, we don't need to make the difference 2555 * between mode clock cycles and wait state clock cycles. 2556 * Indeed, the value of the mode clock cycles is used by a QSPI 2557 * flash memory to know whether it should enter or leave its 0-4-4 2558 * (Continuous Read / XIP) mode. 2559 * eXecution In Place is out of the scope of the mtd sub-system. 2560 * Hence we choose to merge both mode and wait state clock cycles 2561 * into the so called dummy clock cycles. 2562 */ 2563 nor->read_dummy = read->num_mode_clocks + read->num_wait_states; 2564 return 0; 2565 } 2566 2567 static int spi_nor_select_pp(struct spi_nor *nor, 2568 u32 shared_hwcaps) 2569 { 2570 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; 2571 const struct spi_nor_pp_command *pp; 2572 2573 if (best_match < 0) 2574 return -EINVAL; 2575 2576 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); 2577 if (cmd < 0) 2578 return -EINVAL; 2579 2580 pp = &nor->params->page_programs[cmd]; 2581 nor->program_opcode = pp->opcode; 2582 nor->write_proto = pp->proto; 2583 return 0; 2584 } 2585 2586 /** 2587 * spi_nor_select_uniform_erase() - select optimum uniform erase type 2588 * @map: the erase map of the SPI NOR 2589 * 2590 * Once the optimum uniform sector erase command is found, disable all the 2591 * other. 2592 * 2593 * Return: pointer to erase type on success, NULL otherwise. 2594 */ 2595 static const struct spi_nor_erase_type * 2596 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map) 2597 { 2598 const struct spi_nor_erase_type *tested_erase, *erase = NULL; 2599 int i; 2600 u8 uniform_erase_type = map->uniform_region.erase_mask; 2601 2602 /* 2603 * Search for the biggest erase size, except for when compiled 2604 * to use 4k erases. 2605 */ 2606 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2607 if (!(uniform_erase_type & BIT(i))) 2608 continue; 2609 2610 tested_erase = &map->erase_type[i]; 2611 2612 /* Skip masked erase types. */ 2613 if (!tested_erase->size) 2614 continue; 2615 2616 /* 2617 * If the current erase size is the 4k one, stop here, 2618 * we have found the right uniform Sector Erase command. 2619 */ 2620 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) && 2621 tested_erase->size == SZ_4K) { 2622 erase = tested_erase; 2623 break; 2624 } 2625 2626 /* 2627 * Otherwise, the current erase size is still a valid candidate. 2628 * Select the biggest valid candidate. 2629 */ 2630 if (!erase && tested_erase->size) 2631 erase = tested_erase; 2632 /* keep iterating to find the wanted_size */ 2633 } 2634 2635 if (!erase) 2636 return NULL; 2637 2638 /* Disable all other Sector Erase commands. */ 2639 map->uniform_region.erase_mask = BIT(erase - map->erase_type); 2640 return erase; 2641 } 2642 2643 static int spi_nor_select_erase(struct spi_nor *nor) 2644 { 2645 struct spi_nor_erase_map *map = &nor->params->erase_map; 2646 const struct spi_nor_erase_type *erase = NULL; 2647 struct mtd_info *mtd = &nor->mtd; 2648 int i; 2649 2650 /* 2651 * The previous implementation handling Sector Erase commands assumed 2652 * that the SPI flash memory has an uniform layout then used only one 2653 * of the supported erase sizes for all Sector Erase commands. 2654 * So to be backward compatible, the new implementation also tries to 2655 * manage the SPI flash memory as uniform with a single erase sector 2656 * size, when possible. 2657 */ 2658 if (spi_nor_has_uniform_erase(nor)) { 2659 erase = spi_nor_select_uniform_erase(map); 2660 if (!erase) 2661 return -EINVAL; 2662 nor->erase_opcode = erase->opcode; 2663 mtd->erasesize = erase->size; 2664 return 0; 2665 } 2666 2667 /* 2668 * For non-uniform SPI flash memory, set mtd->erasesize to the 2669 * maximum erase sector size. No need to set nor->erase_opcode. 2670 */ 2671 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2672 if (map->erase_type[i].size) { 2673 erase = &map->erase_type[i]; 2674 break; 2675 } 2676 } 2677 2678 if (!erase) 2679 return -EINVAL; 2680 2681 mtd->erasesize = erase->size; 2682 return 0; 2683 } 2684 2685 static int spi_nor_set_addr_nbytes(struct spi_nor *nor) 2686 { 2687 if (nor->params->addr_nbytes) { 2688 nor->addr_nbytes = nor->params->addr_nbytes; 2689 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) { 2690 /* 2691 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So 2692 * in this protocol an odd addr_nbytes cannot be used because 2693 * then the address phase would only span a cycle and a half. 2694 * Half a cycle would be left over. We would then have to start 2695 * the dummy phase in the middle of a cycle and so too the data 2696 * phase, and we will end the transaction with half a cycle left 2697 * over. 2698 * 2699 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to 2700 * avoid this situation. 2701 */ 2702 nor->addr_nbytes = 4; 2703 } else if (nor->info->addr_nbytes) { 2704 nor->addr_nbytes = nor->info->addr_nbytes; 2705 } else { 2706 nor->addr_nbytes = 3; 2707 } 2708 2709 if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) { 2710 /* enable 4-byte addressing if the device exceeds 16MiB */ 2711 nor->addr_nbytes = 4; 2712 } 2713 2714 if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) { 2715 dev_dbg(nor->dev, "The number of address bytes is too large: %u\n", 2716 nor->addr_nbytes); 2717 return -EINVAL; 2718 } 2719 2720 /* Set 4byte opcodes when possible. */ 2721 if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES && 2722 !(nor->flags & SNOR_F_HAS_4BAIT)) 2723 spi_nor_set_4byte_opcodes(nor); 2724 2725 return 0; 2726 } 2727 2728 static int spi_nor_setup(struct spi_nor *nor, 2729 const struct spi_nor_hwcaps *hwcaps) 2730 { 2731 struct spi_nor_flash_parameter *params = nor->params; 2732 u32 ignored_mask, shared_mask; 2733 int err; 2734 2735 /* 2736 * Keep only the hardware capabilities supported by both the SPI 2737 * controller and the SPI flash memory. 2738 */ 2739 shared_mask = hwcaps->mask & params->hwcaps.mask; 2740 2741 if (nor->spimem) { 2742 /* 2743 * When called from spi_nor_probe(), all caps are set and we 2744 * need to discard some of them based on what the SPI 2745 * controller actually supports (using spi_mem_supports_op()). 2746 */ 2747 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask); 2748 } else { 2749 /* 2750 * SPI n-n-n protocols are not supported when the SPI 2751 * controller directly implements the spi_nor interface. 2752 * Yet another reason to switch to spi-mem. 2753 */ 2754 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR; 2755 if (shared_mask & ignored_mask) { 2756 dev_dbg(nor->dev, 2757 "SPI n-n-n protocols are not supported.\n"); 2758 shared_mask &= ~ignored_mask; 2759 } 2760 } 2761 2762 /* Select the (Fast) Read command. */ 2763 err = spi_nor_select_read(nor, shared_mask); 2764 if (err) { 2765 dev_dbg(nor->dev, 2766 "can't select read settings supported by both the SPI controller and memory.\n"); 2767 return err; 2768 } 2769 2770 /* Select the Page Program command. */ 2771 err = spi_nor_select_pp(nor, shared_mask); 2772 if (err) { 2773 dev_dbg(nor->dev, 2774 "can't select write settings supported by both the SPI controller and memory.\n"); 2775 return err; 2776 } 2777 2778 /* Select the Sector Erase command. */ 2779 err = spi_nor_select_erase(nor); 2780 if (err) { 2781 dev_dbg(nor->dev, 2782 "can't select erase settings supported by both the SPI controller and memory.\n"); 2783 return err; 2784 } 2785 2786 return spi_nor_set_addr_nbytes(nor); 2787 } 2788 2789 /** 2790 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and 2791 * settings based on MFR register and ->default_init() hook. 2792 * @nor: pointer to a 'struct spi_nor'. 2793 */ 2794 static void spi_nor_manufacturer_init_params(struct spi_nor *nor) 2795 { 2796 if (nor->manufacturer && nor->manufacturer->fixups && 2797 nor->manufacturer->fixups->default_init) 2798 nor->manufacturer->fixups->default_init(nor); 2799 2800 if (nor->info->fixups && nor->info->fixups->default_init) 2801 nor->info->fixups->default_init(nor); 2802 } 2803 2804 /** 2805 * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and 2806 * settings based on nor->info->sfdp_flags. This method should be called only by 2807 * flashes that do not define SFDP tables. If the flash supports SFDP but the 2808 * information is wrong and the settings from this function can not be retrieved 2809 * by parsing SFDP, one should instead use the fixup hooks and update the wrong 2810 * bits. 2811 * @nor: pointer to a 'struct spi_nor'. 2812 */ 2813 static void spi_nor_no_sfdp_init_params(struct spi_nor *nor) 2814 { 2815 struct spi_nor_flash_parameter *params = nor->params; 2816 struct spi_nor_erase_map *map = ¶ms->erase_map; 2817 const struct flash_info *info = nor->info; 2818 const u8 no_sfdp_flags = info->no_sfdp_flags; 2819 u8 i, erase_mask; 2820 2821 if (no_sfdp_flags & SPI_NOR_DUAL_READ) { 2822 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; 2823 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], 2824 0, 8, SPINOR_OP_READ_1_1_2, 2825 SNOR_PROTO_1_1_2); 2826 } 2827 2828 if (no_sfdp_flags & SPI_NOR_QUAD_READ) { 2829 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; 2830 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], 2831 0, 8, SPINOR_OP_READ_1_1_4, 2832 SNOR_PROTO_1_1_4); 2833 } 2834 2835 if (no_sfdp_flags & SPI_NOR_OCTAL_READ) { 2836 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; 2837 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], 2838 0, 8, SPINOR_OP_READ_1_1_8, 2839 SNOR_PROTO_1_1_8); 2840 } 2841 2842 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) { 2843 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; 2844 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], 2845 0, 20, SPINOR_OP_READ_FAST, 2846 SNOR_PROTO_8_8_8_DTR); 2847 } 2848 2849 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) { 2850 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR; 2851 /* 2852 * Since xSPI Page Program opcode is backward compatible with 2853 * Legacy SPI, use Legacy SPI opcode there as well. 2854 */ 2855 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR], 2856 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR); 2857 } 2858 2859 /* 2860 * Sector Erase settings. Sort Erase Types in ascending order, with the 2861 * smallest erase size starting at BIT(0). 2862 */ 2863 erase_mask = 0; 2864 i = 0; 2865 if (no_sfdp_flags & SECT_4K) { 2866 erase_mask |= BIT(i); 2867 spi_nor_set_erase_type(&map->erase_type[i], 4096u, 2868 SPINOR_OP_BE_4K); 2869 i++; 2870 } 2871 erase_mask |= BIT(i); 2872 spi_nor_set_erase_type(&map->erase_type[i], 2873 info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE, 2874 SPINOR_OP_SE); 2875 spi_nor_init_uniform_erase_map(map, erase_mask, params->size); 2876 } 2877 2878 /** 2879 * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined 2880 * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP. 2881 * @nor: pointer to a 'struct spi_nor' 2882 */ 2883 static void spi_nor_init_flags(struct spi_nor *nor) 2884 { 2885 struct device_node *np = spi_nor_get_flash_node(nor); 2886 const u16 flags = nor->info->flags; 2887 2888 if (of_property_read_bool(np, "broken-flash-reset")) 2889 nor->flags |= SNOR_F_BROKEN_RESET; 2890 2891 if (of_property_read_bool(np, "no-wp")) 2892 nor->flags |= SNOR_F_NO_WP; 2893 2894 if (flags & SPI_NOR_SWP_IS_VOLATILE) 2895 nor->flags |= SNOR_F_SWP_IS_VOLATILE; 2896 2897 if (flags & SPI_NOR_HAS_LOCK) 2898 nor->flags |= SNOR_F_HAS_LOCK; 2899 2900 if (flags & SPI_NOR_HAS_TB) { 2901 nor->flags |= SNOR_F_HAS_SR_TB; 2902 if (flags & SPI_NOR_TB_SR_BIT6) 2903 nor->flags |= SNOR_F_HAS_SR_TB_BIT6; 2904 } 2905 2906 if (flags & SPI_NOR_4BIT_BP) { 2907 nor->flags |= SNOR_F_HAS_4BIT_BP; 2908 if (flags & SPI_NOR_BP3_SR_BIT6) 2909 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; 2910 } 2911 2912 if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 && 2913 !nor->controller_ops) 2914 nor->flags |= SNOR_F_RWW; 2915 } 2916 2917 /** 2918 * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not 2919 * be discovered by SFDP for this particular flash because the SFDP table that 2920 * indicates this support is not defined in the flash. In case the table for 2921 * this support is defined but has wrong values, one should instead use a 2922 * post_sfdp() hook to set the SNOR_F equivalent flag. 2923 * @nor: pointer to a 'struct spi_nor' 2924 */ 2925 static void spi_nor_init_fixup_flags(struct spi_nor *nor) 2926 { 2927 const u8 fixup_flags = nor->info->fixup_flags; 2928 2929 if (fixup_flags & SPI_NOR_4B_OPCODES) 2930 nor->flags |= SNOR_F_4B_OPCODES; 2931 2932 if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE) 2933 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; 2934 } 2935 2936 /** 2937 * spi_nor_late_init_params() - Late initialization of default flash parameters. 2938 * @nor: pointer to a 'struct spi_nor' 2939 * 2940 * Used to initialize flash parameters that are not declared in the JESD216 2941 * SFDP standard, or where SFDP tables are not defined at all. 2942 * Will replace the spi_nor_manufacturer_init_params() method. 2943 */ 2944 static int spi_nor_late_init_params(struct spi_nor *nor) 2945 { 2946 struct spi_nor_flash_parameter *params = nor->params; 2947 int ret; 2948 2949 if (nor->manufacturer && nor->manufacturer->fixups && 2950 nor->manufacturer->fixups->late_init) { 2951 ret = nor->manufacturer->fixups->late_init(nor); 2952 if (ret) 2953 return ret; 2954 } 2955 2956 /* Needed by some flashes late_init hooks. */ 2957 spi_nor_init_flags(nor); 2958 2959 if (nor->info->fixups && nor->info->fixups->late_init) { 2960 ret = nor->info->fixups->late_init(nor); 2961 if (ret) 2962 return ret; 2963 } 2964 2965 if (!nor->params->die_erase_opcode) 2966 nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE; 2967 2968 /* Default method kept for backward compatibility. */ 2969 if (!params->set_4byte_addr_mode) 2970 params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr; 2971 2972 spi_nor_init_fixup_flags(nor); 2973 2974 /* 2975 * NOR protection support. When locking_ops are not provided, we pick 2976 * the default ones. 2977 */ 2978 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) 2979 spi_nor_init_default_locking_ops(nor); 2980 2981 if (params->n_banks > 1) 2982 params->bank_size = div_u64(params->size, params->n_banks); 2983 2984 return 0; 2985 } 2986 2987 /** 2988 * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash 2989 * parameters and settings based on JESD216 SFDP standard. 2990 * @nor: pointer to a 'struct spi_nor'. 2991 * 2992 * The method has a roll-back mechanism: in case the SFDP parsing fails, the 2993 * legacy flash parameters and settings will be restored. 2994 */ 2995 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor) 2996 { 2997 struct spi_nor_flash_parameter sfdp_params; 2998 2999 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); 3000 3001 if (spi_nor_parse_sfdp(nor)) { 3002 memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); 3003 nor->flags &= ~SNOR_F_4B_OPCODES; 3004 } 3005 } 3006 3007 /** 3008 * spi_nor_init_params_deprecated() - Deprecated way of initializing flash 3009 * parameters and settings. 3010 * @nor: pointer to a 'struct spi_nor'. 3011 * 3012 * The method assumes that flash doesn't support SFDP so it initializes flash 3013 * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten 3014 * when parsing SFDP, if supported. 3015 */ 3016 static void spi_nor_init_params_deprecated(struct spi_nor *nor) 3017 { 3018 spi_nor_no_sfdp_init_params(nor); 3019 3020 spi_nor_manufacturer_init_params(nor); 3021 3022 if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ | 3023 SPI_NOR_QUAD_READ | 3024 SPI_NOR_OCTAL_READ | 3025 SPI_NOR_OCTAL_DTR_READ)) 3026 spi_nor_sfdp_init_params_deprecated(nor); 3027 } 3028 3029 /** 3030 * spi_nor_init_default_params() - Default initialization of flash parameters 3031 * and settings. Done for all flashes, regardless is they define SFDP tables 3032 * or not. 3033 * @nor: pointer to a 'struct spi_nor'. 3034 */ 3035 static void spi_nor_init_default_params(struct spi_nor *nor) 3036 { 3037 struct spi_nor_flash_parameter *params = nor->params; 3038 const struct flash_info *info = nor->info; 3039 struct device_node *np = spi_nor_get_flash_node(nor); 3040 3041 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 3042 params->otp.org = info->otp; 3043 3044 /* Default to 16-bit Write Status (01h) Command */ 3045 nor->flags |= SNOR_F_HAS_16BIT_SR; 3046 3047 /* Set SPI NOR sizes. */ 3048 params->writesize = 1; 3049 params->size = info->size; 3050 params->bank_size = params->size; 3051 params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE; 3052 params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS; 3053 3054 /* Default to Fast Read for non-DT and enable it if requested by DT. */ 3055 if (!np || of_property_read_bool(np, "m25p,fast-read")) 3056 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; 3057 3058 /* (Fast) Read settings. */ 3059 params->hwcaps.mask |= SNOR_HWCAPS_READ; 3060 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], 3061 0, 0, SPINOR_OP_READ, 3062 SNOR_PROTO_1_1_1); 3063 3064 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST) 3065 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], 3066 0, 8, SPINOR_OP_READ_FAST, 3067 SNOR_PROTO_1_1_1); 3068 /* Page Program settings. */ 3069 params->hwcaps.mask |= SNOR_HWCAPS_PP; 3070 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], 3071 SPINOR_OP_PP, SNOR_PROTO_1_1_1); 3072 3073 if (info->flags & SPI_NOR_QUAD_PP) { 3074 params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4; 3075 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4], 3076 SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4); 3077 } 3078 } 3079 3080 /** 3081 * spi_nor_init_params() - Initialize the flash's parameters and settings. 3082 * @nor: pointer to a 'struct spi_nor'. 3083 * 3084 * The flash parameters and settings are initialized based on a sequence of 3085 * calls that are ordered by priority: 3086 * 3087 * 1/ Default flash parameters initialization. The initializations are done 3088 * based on nor->info data: 3089 * spi_nor_info_init_params() 3090 * 3091 * which can be overwritten by: 3092 * 2/ Manufacturer flash parameters initialization. The initializations are 3093 * done based on MFR register, or when the decisions can not be done solely 3094 * based on MFR, by using specific flash_info tweeks, ->default_init(): 3095 * spi_nor_manufacturer_init_params() 3096 * 3097 * which can be overwritten by: 3098 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and 3099 * should be more accurate that the above. 3100 * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params() 3101 * 3102 * Please note that there is a ->post_bfpt() fixup hook that can overwrite 3103 * the flash parameters and settings immediately after parsing the Basic 3104 * Flash Parameter Table. 3105 * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed. 3106 * It is used to tweak various flash parameters when information provided 3107 * by the SFDP tables are wrong. 3108 * 3109 * which can be overwritten by: 3110 * 4/ Late flash parameters initialization, used to initialize flash 3111 * parameters that are not declared in the JESD216 SFDP standard, or where SFDP 3112 * tables are not defined at all. 3113 * spi_nor_late_init_params() 3114 * 3115 * Return: 0 on success, -errno otherwise. 3116 */ 3117 static int spi_nor_init_params(struct spi_nor *nor) 3118 { 3119 int ret; 3120 3121 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); 3122 if (!nor->params) 3123 return -ENOMEM; 3124 3125 spi_nor_init_default_params(nor); 3126 3127 if (spi_nor_needs_sfdp(nor)) { 3128 ret = spi_nor_parse_sfdp(nor); 3129 if (ret) { 3130 dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n"); 3131 return ret; 3132 } 3133 } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) { 3134 spi_nor_no_sfdp_init_params(nor); 3135 } else { 3136 spi_nor_init_params_deprecated(nor); 3137 } 3138 3139 ret = spi_nor_late_init_params(nor); 3140 if (ret) 3141 return ret; 3142 3143 if (WARN_ON(!is_power_of_2(nor->params->page_size))) 3144 return -EINVAL; 3145 3146 return 0; 3147 } 3148 3149 /** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O. 3150 * @nor: pointer to a 'struct spi_nor' 3151 * @enable: whether to enable or disable Octal DTR 3152 * 3153 * Return: 0 on success, -errno otherwise. 3154 */ 3155 static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable) 3156 { 3157 int ret; 3158 3159 if (!nor->params->set_octal_dtr) 3160 return 0; 3161 3162 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR && 3163 nor->write_proto == SNOR_PROTO_8_8_8_DTR)) 3164 return 0; 3165 3166 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE)) 3167 return 0; 3168 3169 ret = nor->params->set_octal_dtr(nor, enable); 3170 if (ret) 3171 return ret; 3172 3173 if (enable) 3174 nor->reg_proto = SNOR_PROTO_8_8_8_DTR; 3175 else 3176 nor->reg_proto = SNOR_PROTO_1_1_1; 3177 3178 return 0; 3179 } 3180 3181 /** 3182 * spi_nor_quad_enable() - enable Quad I/O if needed. 3183 * @nor: pointer to a 'struct spi_nor' 3184 * 3185 * Return: 0 on success, -errno otherwise. 3186 */ 3187 static int spi_nor_quad_enable(struct spi_nor *nor) 3188 { 3189 if (!nor->params->quad_enable) 3190 return 0; 3191 3192 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 || 3193 spi_nor_get_protocol_width(nor->write_proto) == 4)) 3194 return 0; 3195 3196 return nor->params->quad_enable(nor); 3197 } 3198 3199 /** 3200 * spi_nor_set_4byte_addr_mode() - Set address mode. 3201 * @nor: pointer to a 'struct spi_nor'. 3202 * @enable: enable/disable 4 byte address mode. 3203 * 3204 * Return: 0 on success, -errno otherwise. 3205 */ 3206 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) 3207 { 3208 struct spi_nor_flash_parameter *params = nor->params; 3209 int ret; 3210 3211 if (enable) { 3212 /* 3213 * If the RESET# pin isn't hooked up properly, or the system 3214 * otherwise doesn't perform a reset command in the boot 3215 * sequence, it's impossible to 100% protect against unexpected 3216 * reboots (e.g., crashes). Warn the user (or hopefully, system 3217 * designer) that this is bad. 3218 */ 3219 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, 3220 "enabling reset hack; may not recover from unexpected reboots\n"); 3221 } 3222 3223 ret = params->set_4byte_addr_mode(nor, enable); 3224 if (ret && ret != -EOPNOTSUPP) 3225 return ret; 3226 3227 if (enable) { 3228 params->addr_nbytes = 4; 3229 params->addr_mode_nbytes = 4; 3230 } else { 3231 params->addr_nbytes = 3; 3232 params->addr_mode_nbytes = 3; 3233 } 3234 3235 return 0; 3236 } 3237 3238 static int spi_nor_init(struct spi_nor *nor) 3239 { 3240 int err; 3241 3242 err = spi_nor_set_octal_dtr(nor, true); 3243 if (err) { 3244 dev_dbg(nor->dev, "octal mode not supported\n"); 3245 return err; 3246 } 3247 3248 err = spi_nor_quad_enable(nor); 3249 if (err) { 3250 dev_dbg(nor->dev, "quad mode not supported\n"); 3251 return err; 3252 } 3253 3254 /* 3255 * Some SPI NOR flashes are write protected by default after a power-on 3256 * reset cycle, in order to avoid inadvertent writes during power-up. 3257 * Backward compatibility imposes to unlock the entire flash memory 3258 * array at power-up by default. Depending on the kernel configuration 3259 * (1) do nothing, (2) always unlock the entire flash array or (3) 3260 * unlock the entire flash array only when the software write 3261 * protection bits are volatile. The latter is indicated by 3262 * SNOR_F_SWP_IS_VOLATILE. 3263 */ 3264 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) || 3265 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) && 3266 nor->flags & SNOR_F_SWP_IS_VOLATILE)) 3267 spi_nor_try_unlock_all(nor); 3268 3269 if (nor->addr_nbytes == 4 && 3270 nor->read_proto != SNOR_PROTO_8_8_8_DTR && 3271 !(nor->flags & SNOR_F_4B_OPCODES)) 3272 return spi_nor_set_4byte_addr_mode(nor, true); 3273 3274 return 0; 3275 } 3276 3277 /** 3278 * spi_nor_soft_reset() - Perform a software reset 3279 * @nor: pointer to 'struct spi_nor' 3280 * 3281 * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets 3282 * the device to its power-on-reset state. This is useful when the software has 3283 * made some changes to device (volatile) registers and needs to reset it before 3284 * shutting down, for example. 3285 * 3286 * Not every flash supports this sequence. The same set of opcodes might be used 3287 * for some other operation on a flash that does not support this. Support for 3288 * this sequence can be discovered via SFDP in the BFPT table. 3289 * 3290 * Return: 0 on success, -errno otherwise. 3291 */ 3292 static void spi_nor_soft_reset(struct spi_nor *nor) 3293 { 3294 struct spi_mem_op op; 3295 int ret; 3296 3297 op = (struct spi_mem_op)SPINOR_SRSTEN_OP; 3298 3299 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 3300 3301 ret = spi_mem_exec_op(nor->spimem, &op); 3302 if (ret) { 3303 if (ret != -EOPNOTSUPP) 3304 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 3305 return; 3306 } 3307 3308 op = (struct spi_mem_op)SPINOR_SRST_OP; 3309 3310 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 3311 3312 ret = spi_mem_exec_op(nor->spimem, &op); 3313 if (ret) { 3314 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 3315 return; 3316 } 3317 3318 /* 3319 * Software Reset is not instant, and the delay varies from flash to 3320 * flash. Looking at a few flashes, most range somewhere below 100 3321 * microseconds. So, sleep for a range of 200-400 us. 3322 */ 3323 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX); 3324 } 3325 3326 /* mtd suspend handler */ 3327 static int spi_nor_suspend(struct mtd_info *mtd) 3328 { 3329 struct spi_nor *nor = mtd_to_spi_nor(mtd); 3330 int ret; 3331 3332 /* Disable octal DTR mode if we enabled it. */ 3333 ret = spi_nor_set_octal_dtr(nor, false); 3334 if (ret) 3335 dev_err(nor->dev, "suspend() failed\n"); 3336 3337 return ret; 3338 } 3339 3340 /* mtd resume handler */ 3341 static void spi_nor_resume(struct mtd_info *mtd) 3342 { 3343 struct spi_nor *nor = mtd_to_spi_nor(mtd); 3344 struct device *dev = nor->dev; 3345 int ret; 3346 3347 /* re-initialize the nor chip */ 3348 ret = spi_nor_init(nor); 3349 if (ret) 3350 dev_err(dev, "resume() failed\n"); 3351 } 3352 3353 static int spi_nor_get_device(struct mtd_info *mtd) 3354 { 3355 struct mtd_info *master = mtd_get_master(mtd); 3356 struct spi_nor *nor = mtd_to_spi_nor(master); 3357 struct device *dev; 3358 3359 if (nor->spimem) 3360 dev = nor->spimem->spi->controller->dev.parent; 3361 else 3362 dev = nor->dev; 3363 3364 if (!try_module_get(dev->driver->owner)) 3365 return -ENODEV; 3366 3367 return 0; 3368 } 3369 3370 static void spi_nor_put_device(struct mtd_info *mtd) 3371 { 3372 struct mtd_info *master = mtd_get_master(mtd); 3373 struct spi_nor *nor = mtd_to_spi_nor(master); 3374 struct device *dev; 3375 3376 if (nor->spimem) 3377 dev = nor->spimem->spi->controller->dev.parent; 3378 else 3379 dev = nor->dev; 3380 3381 module_put(dev->driver->owner); 3382 } 3383 3384 static void spi_nor_restore(struct spi_nor *nor) 3385 { 3386 int ret; 3387 3388 /* restore the addressing mode */ 3389 if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && 3390 nor->flags & SNOR_F_BROKEN_RESET) { 3391 ret = spi_nor_set_4byte_addr_mode(nor, false); 3392 if (ret) 3393 /* 3394 * Do not stop the execution in the hope that the flash 3395 * will default to the 3-byte address mode after the 3396 * software reset. 3397 */ 3398 dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret); 3399 } 3400 3401 if (nor->flags & SNOR_F_SOFT_RESET) 3402 spi_nor_soft_reset(nor); 3403 } 3404 3405 static const struct flash_info *spi_nor_match_name(struct spi_nor *nor, 3406 const char *name) 3407 { 3408 unsigned int i, j; 3409 3410 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 3411 for (j = 0; j < manufacturers[i]->nparts; j++) { 3412 if (manufacturers[i]->parts[j].name && 3413 !strcmp(name, manufacturers[i]->parts[j].name)) { 3414 nor->manufacturer = manufacturers[i]; 3415 return &manufacturers[i]->parts[j]; 3416 } 3417 } 3418 } 3419 3420 return NULL; 3421 } 3422 3423 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, 3424 const char *name) 3425 { 3426 const struct flash_info *info = NULL; 3427 3428 if (name) 3429 info = spi_nor_match_name(nor, name); 3430 /* 3431 * Auto-detect if chip name wasn't specified or not found, or the chip 3432 * has an ID. If the chip supposedly has an ID, we also do an 3433 * auto-detection to compare it later. 3434 */ 3435 if (!info || info->id) { 3436 const struct flash_info *jinfo; 3437 3438 jinfo = spi_nor_detect(nor); 3439 if (IS_ERR(jinfo)) 3440 return jinfo; 3441 3442 /* 3443 * If caller has specified name of flash model that can normally 3444 * be detected using JEDEC, let's verify it. 3445 */ 3446 if (info && jinfo != info) 3447 dev_warn(nor->dev, "found %s, expected %s\n", 3448 jinfo->name, info->name); 3449 3450 /* If info was set before, JEDEC knows better. */ 3451 info = jinfo; 3452 } 3453 3454 return info; 3455 } 3456 3457 static u32 3458 spi_nor_get_region_erasesize(const struct spi_nor_erase_region *region, 3459 const struct spi_nor_erase_type *erase_type) 3460 { 3461 int i; 3462 3463 if (region->overlaid) 3464 return region->size; 3465 3466 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 3467 if (region->erase_mask & BIT(i)) 3468 return erase_type[i].size; 3469 } 3470 3471 return 0; 3472 } 3473 3474 static int spi_nor_set_mtd_eraseregions(struct spi_nor *nor) 3475 { 3476 const struct spi_nor_erase_map *map = &nor->params->erase_map; 3477 const struct spi_nor_erase_region *region = map->regions; 3478 struct mtd_erase_region_info *mtd_region; 3479 struct mtd_info *mtd = &nor->mtd; 3480 u32 erasesize, i; 3481 3482 mtd_region = devm_kcalloc(nor->dev, map->n_regions, sizeof(*mtd_region), 3483 GFP_KERNEL); 3484 if (!mtd_region) 3485 return -ENOMEM; 3486 3487 for (i = 0; i < map->n_regions; i++) { 3488 erasesize = spi_nor_get_region_erasesize(®ion[i], 3489 map->erase_type); 3490 if (!erasesize) 3491 return -EINVAL; 3492 3493 mtd_region[i].erasesize = erasesize; 3494 mtd_region[i].numblocks = div_u64(region[i].size, erasesize); 3495 mtd_region[i].offset = region[i].offset; 3496 } 3497 3498 mtd->numeraseregions = map->n_regions; 3499 mtd->eraseregions = mtd_region; 3500 3501 return 0; 3502 } 3503 3504 static int spi_nor_set_mtd_info(struct spi_nor *nor) 3505 { 3506 struct mtd_info *mtd = &nor->mtd; 3507 struct device *dev = nor->dev; 3508 3509 spi_nor_set_mtd_locking_ops(nor); 3510 spi_nor_set_mtd_otp_ops(nor); 3511 3512 mtd->dev.parent = dev; 3513 if (!mtd->name) 3514 mtd->name = dev_name(dev); 3515 mtd->type = MTD_NORFLASH; 3516 mtd->flags = MTD_CAP_NORFLASH; 3517 /* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */ 3518 if (nor->flags & SNOR_F_ECC) 3519 mtd->flags &= ~MTD_BIT_WRITEABLE; 3520 if (nor->info->flags & SPI_NOR_NO_ERASE) 3521 mtd->flags |= MTD_NO_ERASE; 3522 else 3523 mtd->_erase = spi_nor_erase; 3524 mtd->writesize = nor->params->writesize; 3525 mtd->writebufsize = nor->params->page_size; 3526 mtd->size = nor->params->size; 3527 mtd->_read = spi_nor_read; 3528 /* Might be already set by some SST flashes. */ 3529 if (!mtd->_write) 3530 mtd->_write = spi_nor_write; 3531 mtd->_suspend = spi_nor_suspend; 3532 mtd->_resume = spi_nor_resume; 3533 mtd->_get_device = spi_nor_get_device; 3534 mtd->_put_device = spi_nor_put_device; 3535 3536 if (!spi_nor_has_uniform_erase(nor)) 3537 return spi_nor_set_mtd_eraseregions(nor); 3538 3539 return 0; 3540 } 3541 3542 static int spi_nor_hw_reset(struct spi_nor *nor) 3543 { 3544 struct gpio_desc *reset; 3545 3546 reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW); 3547 if (IS_ERR_OR_NULL(reset)) 3548 return PTR_ERR_OR_ZERO(reset); 3549 3550 /* 3551 * Experimental delay values by looking at different flash device 3552 * vendors datasheets. 3553 */ 3554 usleep_range(1, 5); 3555 gpiod_set_value_cansleep(reset, 1); 3556 usleep_range(100, 150); 3557 gpiod_set_value_cansleep(reset, 0); 3558 usleep_range(1000, 1200); 3559 3560 return 0; 3561 } 3562 3563 int spi_nor_scan(struct spi_nor *nor, const char *name, 3564 const struct spi_nor_hwcaps *hwcaps) 3565 { 3566 const struct flash_info *info; 3567 struct device *dev = nor->dev; 3568 int ret; 3569 3570 ret = spi_nor_check(nor); 3571 if (ret) 3572 return ret; 3573 3574 /* Reset SPI protocol for all commands. */ 3575 nor->reg_proto = SNOR_PROTO_1_1_1; 3576 nor->read_proto = SNOR_PROTO_1_1_1; 3577 nor->write_proto = SNOR_PROTO_1_1_1; 3578 3579 /* 3580 * We need the bounce buffer early to read/write registers when going 3581 * through the spi-mem layer (buffers have to be DMA-able). 3582 * For spi-mem drivers, we'll reallocate a new buffer if 3583 * nor->params->page_size turns out to be greater than PAGE_SIZE (which 3584 * shouldn't happen before long since NOR pages are usually less 3585 * than 1KB) after spi_nor_scan() returns. 3586 */ 3587 nor->bouncebuf_size = PAGE_SIZE; 3588 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size, 3589 GFP_KERNEL); 3590 if (!nor->bouncebuf) 3591 return -ENOMEM; 3592 3593 ret = spi_nor_hw_reset(nor); 3594 if (ret) 3595 return ret; 3596 3597 info = spi_nor_get_flash_info(nor, name); 3598 if (IS_ERR(info)) 3599 return PTR_ERR(info); 3600 3601 nor->info = info; 3602 3603 mutex_init(&nor->lock); 3604 3605 /* Init flash parameters based on flash_info struct and SFDP */ 3606 ret = spi_nor_init_params(nor); 3607 if (ret) 3608 return ret; 3609 3610 if (spi_nor_use_parallel_locking(nor)) 3611 init_waitqueue_head(&nor->rww.wait); 3612 3613 /* 3614 * Configure the SPI memory: 3615 * - select op codes for (Fast) Read, Page Program and Sector Erase. 3616 * - set the number of dummy cycles (mode cycles + wait states). 3617 * - set the SPI protocols for register and memory accesses. 3618 * - set the number of address bytes. 3619 */ 3620 ret = spi_nor_setup(nor, hwcaps); 3621 if (ret) 3622 return ret; 3623 3624 /* Send all the required SPI flash commands to initialize device */ 3625 ret = spi_nor_init(nor); 3626 if (ret) 3627 return ret; 3628 3629 /* No mtd_info fields should be used up to this point. */ 3630 ret = spi_nor_set_mtd_info(nor); 3631 if (ret) 3632 return ret; 3633 3634 dev_dbg(dev, "Manufacturer and device ID: %*phN\n", 3635 SPI_NOR_MAX_ID_LEN, nor->id); 3636 3637 return 0; 3638 } 3639 EXPORT_SYMBOL_GPL(spi_nor_scan); 3640 3641 static int spi_nor_create_read_dirmap(struct spi_nor *nor) 3642 { 3643 struct spi_mem_dirmap_info info = { 3644 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 3645 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0), 3646 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 3647 SPI_MEM_OP_DATA_IN(0, NULL, 0)), 3648 .offset = 0, 3649 .length = nor->params->size, 3650 }; 3651 struct spi_mem_op *op = &info.op_tmpl; 3652 3653 spi_nor_spimem_setup_op(nor, op, nor->read_proto); 3654 3655 /* convert the dummy cycles to the number of bytes */ 3656 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; 3657 if (spi_nor_protocol_is_dtr(nor->read_proto)) 3658 op->dummy.nbytes *= 2; 3659 3660 /* 3661 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3662 * of data bytes is non-zero, the data buswidth won't be set here. So, 3663 * do it explicitly. 3664 */ 3665 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); 3666 3667 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3668 &info); 3669 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc); 3670 } 3671 3672 static int spi_nor_create_write_dirmap(struct spi_nor *nor) 3673 { 3674 struct spi_mem_dirmap_info info = { 3675 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 3676 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0), 3677 SPI_MEM_OP_NO_DUMMY, 3678 SPI_MEM_OP_DATA_OUT(0, NULL, 0)), 3679 .offset = 0, 3680 .length = nor->params->size, 3681 }; 3682 struct spi_mem_op *op = &info.op_tmpl; 3683 3684 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 3685 op->addr.nbytes = 0; 3686 3687 spi_nor_spimem_setup_op(nor, op, nor->write_proto); 3688 3689 /* 3690 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3691 * of data bytes is non-zero, the data buswidth won't be set here. So, 3692 * do it explicitly. 3693 */ 3694 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); 3695 3696 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3697 &info); 3698 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); 3699 } 3700 3701 static int spi_nor_probe(struct spi_mem *spimem) 3702 { 3703 struct spi_device *spi = spimem->spi; 3704 struct device *dev = &spi->dev; 3705 struct flash_platform_data *data = dev_get_platdata(dev); 3706 struct spi_nor *nor; 3707 /* 3708 * Enable all caps by default. The core will mask them after 3709 * checking what's really supported using spi_mem_supports_op(). 3710 */ 3711 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL }; 3712 char *flash_name; 3713 int ret; 3714 3715 ret = devm_regulator_get_enable(dev, "vcc"); 3716 if (ret) 3717 return ret; 3718 3719 nor = devm_kzalloc(dev, sizeof(*nor), GFP_KERNEL); 3720 if (!nor) 3721 return -ENOMEM; 3722 3723 nor->spimem = spimem; 3724 nor->dev = dev; 3725 spi_nor_set_flash_node(nor, dev->of_node); 3726 3727 spi_mem_set_drvdata(spimem, nor); 3728 3729 if (data && data->name) 3730 nor->mtd.name = data->name; 3731 3732 if (!nor->mtd.name) 3733 nor->mtd.name = spi_mem_get_name(spimem); 3734 3735 /* 3736 * For some (historical?) reason many platforms provide two different 3737 * names in flash_platform_data: "name" and "type". Quite often name is 3738 * set to "m25p80" and then "type" provides a real chip name. 3739 * If that's the case, respect "type" and ignore a "name". 3740 */ 3741 if (data && data->type) 3742 flash_name = data->type; 3743 else if (!strcmp(spi->modalias, "spi-nor")) 3744 flash_name = NULL; /* auto-detect */ 3745 else 3746 flash_name = spi->modalias; 3747 3748 ret = spi_nor_scan(nor, flash_name, &hwcaps); 3749 if (ret) 3750 return ret; 3751 3752 spi_nor_debugfs_register(nor); 3753 3754 /* 3755 * None of the existing parts have > 512B pages, but let's play safe 3756 * and add this logic so that if anyone ever adds support for such 3757 * a NOR we don't end up with buffer overflows. 3758 */ 3759 if (nor->params->page_size > PAGE_SIZE) { 3760 nor->bouncebuf_size = nor->params->page_size; 3761 devm_kfree(dev, nor->bouncebuf); 3762 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size, 3763 GFP_KERNEL); 3764 if (!nor->bouncebuf) 3765 return -ENOMEM; 3766 } 3767 3768 ret = spi_nor_create_read_dirmap(nor); 3769 if (ret) 3770 return ret; 3771 3772 ret = spi_nor_create_write_dirmap(nor); 3773 if (ret) 3774 return ret; 3775 3776 return mtd_device_register(&nor->mtd, data ? data->parts : NULL, 3777 data ? data->nr_parts : 0); 3778 } 3779 3780 static int spi_nor_remove(struct spi_mem *spimem) 3781 { 3782 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3783 3784 spi_nor_restore(nor); 3785 3786 /* Clean up MTD stuff. */ 3787 return mtd_device_unregister(&nor->mtd); 3788 } 3789 3790 static void spi_nor_shutdown(struct spi_mem *spimem) 3791 { 3792 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3793 3794 spi_nor_restore(nor); 3795 } 3796 3797 /* 3798 * Do NOT add to this array without reading the following: 3799 * 3800 * Historically, many flash devices are bound to this driver by their name. But 3801 * since most of these flash are compatible to some extent, and their 3802 * differences can often be differentiated by the JEDEC read-ID command, we 3803 * encourage new users to add support to the spi-nor library, and simply bind 3804 * against a generic string here (e.g., "jedec,spi-nor"). 3805 * 3806 * Many flash names are kept here in this list to keep them available 3807 * as module aliases for existing platforms. 3808 */ 3809 static const struct spi_device_id spi_nor_dev_ids[] = { 3810 /* 3811 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and 3812 * hack around the fact that the SPI core does not provide uevent 3813 * matching for .of_match_table 3814 */ 3815 {"spi-nor"}, 3816 3817 /* 3818 * Entries not used in DTs that should be safe to drop after replacing 3819 * them with "spi-nor" in platform data. 3820 */ 3821 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"}, 3822 3823 /* 3824 * Entries that were used in DTs without "jedec,spi-nor" fallback and 3825 * should be kept for backward compatibility. 3826 */ 3827 {"at25df321a"}, {"at25df641"}, {"at26df081a"}, 3828 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"}, 3829 {"mx25l25635e"},{"mx66l51235l"}, 3830 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"}, 3831 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"}, 3832 {"s25fl064k"}, 3833 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"}, 3834 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"}, 3835 {"m25p64"}, {"m25p128"}, 3836 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, 3837 {"w25q80bl"}, {"w25q128"}, {"w25q256"}, 3838 3839 /* Flashes that can't be detected using JEDEC */ 3840 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, 3841 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, 3842 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, 3843 3844 /* Everspin MRAMs (non-JEDEC) */ 3845 { "mr25h128" }, /* 128 Kib, 40 MHz */ 3846 { "mr25h256" }, /* 256 Kib, 40 MHz */ 3847 { "mr25h10" }, /* 1 Mib, 40 MHz */ 3848 { "mr25h40" }, /* 4 Mib, 40 MHz */ 3849 3850 { }, 3851 }; 3852 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids); 3853 3854 static const struct of_device_id spi_nor_of_table[] = { 3855 /* 3856 * Generic compatibility for SPI NOR that can be identified by the 3857 * JEDEC READ ID opcode (0x9F). Use this, if possible. 3858 */ 3859 { .compatible = "jedec,spi-nor" }, 3860 { /* sentinel */ }, 3861 }; 3862 MODULE_DEVICE_TABLE(of, spi_nor_of_table); 3863 3864 /* 3865 * REVISIT: many of these chips have deep power-down modes, which 3866 * should clearly be entered on suspend() to minimize power use. 3867 * And also when they're otherwise idle... 3868 */ 3869 static struct spi_mem_driver spi_nor_driver = { 3870 .spidrv = { 3871 .driver = { 3872 .name = "spi-nor", 3873 .of_match_table = spi_nor_of_table, 3874 .dev_groups = spi_nor_sysfs_groups, 3875 }, 3876 .id_table = spi_nor_dev_ids, 3877 }, 3878 .probe = spi_nor_probe, 3879 .remove = spi_nor_remove, 3880 .shutdown = spi_nor_shutdown, 3881 }; 3882 3883 static int __init spi_nor_module_init(void) 3884 { 3885 return spi_mem_driver_register(&spi_nor_driver); 3886 } 3887 module_init(spi_nor_module_init); 3888 3889 static void __exit spi_nor_module_exit(void) 3890 { 3891 spi_mem_driver_unregister(&spi_nor_driver); 3892 spi_nor_debugfs_shutdown(); 3893 } 3894 module_exit(spi_nor_module_exit); 3895 3896 MODULE_LICENSE("GPL v2"); 3897 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); 3898 MODULE_AUTHOR("Mike Lavender"); 3899 MODULE_DESCRIPTION("framework for SPI NOR"); 3900