1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with 4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c 5 * 6 * Copyright (C) 2005, Intec Automation Inc. 7 * Copyright (C) 2014, Freescale Semiconductor, Inc. 8 */ 9 10 #include <linux/err.h> 11 #include <linux/errno.h> 12 #include <linux/delay.h> 13 #include <linux/device.h> 14 #include <linux/math64.h> 15 #include <linux/module.h> 16 #include <linux/mtd/mtd.h> 17 #include <linux/mtd/spi-nor.h> 18 #include <linux/mutex.h> 19 #include <linux/of_platform.h> 20 #include <linux/sched/task_stack.h> 21 #include <linux/sizes.h> 22 #include <linux/slab.h> 23 #include <linux/spi/flash.h> 24 25 #include "core.h" 26 27 /* Define max times to check status register before we give up. */ 28 29 /* 30 * For everything but full-chip erase; probably could be much smaller, but kept 31 * around for safety for now 32 */ 33 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) 34 35 /* 36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up 37 * for larger flash 38 */ 39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) 40 41 #define SPI_NOR_MAX_ADDR_NBYTES 4 42 43 #define SPI_NOR_SRST_SLEEP_MIN 200 44 #define SPI_NOR_SRST_SLEEP_MAX 400 45 46 /** 47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the 48 * extension type. 49 * @nor: pointer to a 'struct spi_nor' 50 * @op: pointer to the 'struct spi_mem_op' whose properties 51 * need to be initialized. 52 * 53 * Right now, only "repeat" and "invert" are supported. 54 * 55 * Return: The opcode extension. 56 */ 57 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor, 58 const struct spi_mem_op *op) 59 { 60 switch (nor->cmd_ext_type) { 61 case SPI_NOR_EXT_INVERT: 62 return ~op->cmd.opcode; 63 64 case SPI_NOR_EXT_REPEAT: 65 return op->cmd.opcode; 66 67 default: 68 dev_err(nor->dev, "Unknown command extension type\n"); 69 return 0; 70 } 71 } 72 73 /** 74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op. 75 * @nor: pointer to a 'struct spi_nor' 76 * @op: pointer to the 'struct spi_mem_op' whose properties 77 * need to be initialized. 78 * @proto: the protocol from which the properties need to be set. 79 */ 80 void spi_nor_spimem_setup_op(const struct spi_nor *nor, 81 struct spi_mem_op *op, 82 const enum spi_nor_protocol proto) 83 { 84 u8 ext; 85 86 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto); 87 88 if (op->addr.nbytes) 89 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); 90 91 if (op->dummy.nbytes) 92 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); 93 94 if (op->data.nbytes) 95 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); 96 97 if (spi_nor_protocol_is_dtr(proto)) { 98 /* 99 * SPIMEM supports mixed DTR modes, but right now we can only 100 * have all phases either DTR or STR. IOW, SPIMEM can have 101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4 102 * phases to either DTR or STR. 103 */ 104 op->cmd.dtr = true; 105 op->addr.dtr = true; 106 op->dummy.dtr = true; 107 op->data.dtr = true; 108 109 /* 2 bytes per clock cycle in DTR mode. */ 110 op->dummy.nbytes *= 2; 111 112 ext = spi_nor_get_cmd_ext(nor, op); 113 op->cmd.opcode = (op->cmd.opcode << 8) | ext; 114 op->cmd.nbytes = 2; 115 } 116 } 117 118 /** 119 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data 120 * transfer 121 * @nor: pointer to 'struct spi_nor' 122 * @op: pointer to 'struct spi_mem_op' template for transfer 123 * 124 * If we have to use the bounce buffer, the data field in @op will be updated. 125 * 126 * Return: true if the bounce buffer is needed, false if not 127 */ 128 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op) 129 { 130 /* op->data.buf.in occupies the same memory as op->data.buf.out */ 131 if (object_is_on_stack(op->data.buf.in) || 132 !virt_addr_valid(op->data.buf.in)) { 133 if (op->data.nbytes > nor->bouncebuf_size) 134 op->data.nbytes = nor->bouncebuf_size; 135 op->data.buf.in = nor->bouncebuf; 136 return true; 137 } 138 139 return false; 140 } 141 142 /** 143 * spi_nor_spimem_exec_op() - execute a memory operation 144 * @nor: pointer to 'struct spi_nor' 145 * @op: pointer to 'struct spi_mem_op' template for transfer 146 * 147 * Return: 0 on success, -error otherwise. 148 */ 149 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op) 150 { 151 int error; 152 153 error = spi_mem_adjust_op_size(nor->spimem, op); 154 if (error) 155 return error; 156 157 return spi_mem_exec_op(nor->spimem, op); 158 } 159 160 int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode, 161 u8 *buf, size_t len) 162 { 163 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 164 return -EOPNOTSUPP; 165 166 return nor->controller_ops->read_reg(nor, opcode, buf, len); 167 } 168 169 int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, 170 const u8 *buf, size_t len) 171 { 172 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 173 return -EOPNOTSUPP; 174 175 return nor->controller_ops->write_reg(nor, opcode, buf, len); 176 } 177 178 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs) 179 { 180 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 181 return -EOPNOTSUPP; 182 183 return nor->controller_ops->erase(nor, offs); 184 } 185 186 /** 187 * spi_nor_spimem_read_data() - read data from flash's memory region via 188 * spi-mem 189 * @nor: pointer to 'struct spi_nor' 190 * @from: offset to read from 191 * @len: number of bytes to read 192 * @buf: pointer to dst buffer 193 * 194 * Return: number of bytes read successfully, -errno otherwise 195 */ 196 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, 197 size_t len, u8 *buf) 198 { 199 struct spi_mem_op op = 200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 201 SPI_MEM_OP_ADDR(nor->addr_nbytes, from, 0), 202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 203 SPI_MEM_OP_DATA_IN(len, buf, 0)); 204 bool usebouncebuf; 205 ssize_t nbytes; 206 int error; 207 208 spi_nor_spimem_setup_op(nor, &op, nor->read_proto); 209 210 /* convert the dummy cycles to the number of bytes */ 211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 212 if (spi_nor_protocol_is_dtr(nor->read_proto)) 213 op.dummy.nbytes *= 2; 214 215 usebouncebuf = spi_nor_spimem_bounce(nor, &op); 216 217 if (nor->dirmap.rdesc) { 218 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val, 219 op.data.nbytes, op.data.buf.in); 220 } else { 221 error = spi_nor_spimem_exec_op(nor, &op); 222 if (error) 223 return error; 224 nbytes = op.data.nbytes; 225 } 226 227 if (usebouncebuf && nbytes > 0) 228 memcpy(buf, op.data.buf.in, nbytes); 229 230 return nbytes; 231 } 232 233 /** 234 * spi_nor_read_data() - read data from flash memory 235 * @nor: pointer to 'struct spi_nor' 236 * @from: offset to read from 237 * @len: number of bytes to read 238 * @buf: pointer to dst buffer 239 * 240 * Return: number of bytes read successfully, -errno otherwise 241 */ 242 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf) 243 { 244 if (nor->spimem) 245 return spi_nor_spimem_read_data(nor, from, len, buf); 246 247 return nor->controller_ops->read(nor, from, len, buf); 248 } 249 250 /** 251 * spi_nor_spimem_write_data() - write data to flash memory via 252 * spi-mem 253 * @nor: pointer to 'struct spi_nor' 254 * @to: offset to write to 255 * @len: number of bytes to write 256 * @buf: pointer to src buffer 257 * 258 * Return: number of bytes written successfully, -errno otherwise 259 */ 260 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, 261 size_t len, const u8 *buf) 262 { 263 struct spi_mem_op op = 264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 265 SPI_MEM_OP_ADDR(nor->addr_nbytes, to, 0), 266 SPI_MEM_OP_NO_DUMMY, 267 SPI_MEM_OP_DATA_OUT(len, buf, 0)); 268 ssize_t nbytes; 269 int error; 270 271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 272 op.addr.nbytes = 0; 273 274 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 275 276 if (spi_nor_spimem_bounce(nor, &op)) 277 memcpy(nor->bouncebuf, buf, op.data.nbytes); 278 279 if (nor->dirmap.wdesc) { 280 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val, 281 op.data.nbytes, op.data.buf.out); 282 } else { 283 error = spi_nor_spimem_exec_op(nor, &op); 284 if (error) 285 return error; 286 nbytes = op.data.nbytes; 287 } 288 289 return nbytes; 290 } 291 292 /** 293 * spi_nor_write_data() - write data to flash memory 294 * @nor: pointer to 'struct spi_nor' 295 * @to: offset to write to 296 * @len: number of bytes to write 297 * @buf: pointer to src buffer 298 * 299 * Return: number of bytes written successfully, -errno otherwise 300 */ 301 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, 302 const u8 *buf) 303 { 304 if (nor->spimem) 305 return spi_nor_spimem_write_data(nor, to, len, buf); 306 307 return nor->controller_ops->write(nor, to, len, buf); 308 } 309 310 /** 311 * spi_nor_read_any_reg() - read any register from flash memory, nonvolatile or 312 * volatile. 313 * @nor: pointer to 'struct spi_nor'. 314 * @op: SPI memory operation. op->data.buf must be DMA-able. 315 * @proto: SPI protocol to use for the register operation. 316 * 317 * Return: zero on success, -errno otherwise 318 */ 319 int spi_nor_read_any_reg(struct spi_nor *nor, struct spi_mem_op *op, 320 enum spi_nor_protocol proto) 321 { 322 if (!nor->spimem) 323 return -EOPNOTSUPP; 324 325 spi_nor_spimem_setup_op(nor, op, proto); 326 return spi_nor_spimem_exec_op(nor, op); 327 } 328 329 /** 330 * spi_nor_write_any_volatile_reg() - write any volatile register to flash 331 * memory. 332 * @nor: pointer to 'struct spi_nor' 333 * @op: SPI memory operation. op->data.buf must be DMA-able. 334 * @proto: SPI protocol to use for the register operation. 335 * 336 * Writing volatile registers are instant according to some manufacturers 337 * (Cypress, Micron) and do not need any status polling. 338 * 339 * Return: zero on success, -errno otherwise 340 */ 341 int spi_nor_write_any_volatile_reg(struct spi_nor *nor, struct spi_mem_op *op, 342 enum spi_nor_protocol proto) 343 { 344 int ret; 345 346 if (!nor->spimem) 347 return -EOPNOTSUPP; 348 349 ret = spi_nor_write_enable(nor); 350 if (ret) 351 return ret; 352 spi_nor_spimem_setup_op(nor, op, proto); 353 return spi_nor_spimem_exec_op(nor, op); 354 } 355 356 /** 357 * spi_nor_write_enable() - Set write enable latch with Write Enable command. 358 * @nor: pointer to 'struct spi_nor'. 359 * 360 * Return: 0 on success, -errno otherwise. 361 */ 362 int spi_nor_write_enable(struct spi_nor *nor) 363 { 364 int ret; 365 366 if (nor->spimem) { 367 struct spi_mem_op op = SPI_NOR_WREN_OP; 368 369 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 370 371 ret = spi_mem_exec_op(nor->spimem, &op); 372 } else { 373 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN, 374 NULL, 0); 375 } 376 377 if (ret) 378 dev_dbg(nor->dev, "error %d on Write Enable\n", ret); 379 380 return ret; 381 } 382 383 /** 384 * spi_nor_write_disable() - Send Write Disable instruction to the chip. 385 * @nor: pointer to 'struct spi_nor'. 386 * 387 * Return: 0 on success, -errno otherwise. 388 */ 389 int spi_nor_write_disable(struct spi_nor *nor) 390 { 391 int ret; 392 393 if (nor->spimem) { 394 struct spi_mem_op op = SPI_NOR_WRDI_OP; 395 396 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 397 398 ret = spi_mem_exec_op(nor->spimem, &op); 399 } else { 400 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI, 401 NULL, 0); 402 } 403 404 if (ret) 405 dev_dbg(nor->dev, "error %d on Write Disable\n", ret); 406 407 return ret; 408 } 409 410 /** 411 * spi_nor_read_id() - Read the JEDEC ID. 412 * @nor: pointer to 'struct spi_nor'. 413 * @naddr: number of address bytes to send. Can be zero if the operation 414 * does not need to send an address. 415 * @ndummy: number of dummy bytes to send after an opcode or address. Can 416 * be zero if the operation does not require dummy bytes. 417 * @id: pointer to a DMA-able buffer where the value of the JEDEC ID 418 * will be written. 419 * @proto: the SPI protocol for register operation. 420 * 421 * Return: 0 on success, -errno otherwise. 422 */ 423 int spi_nor_read_id(struct spi_nor *nor, u8 naddr, u8 ndummy, u8 *id, 424 enum spi_nor_protocol proto) 425 { 426 int ret; 427 428 if (nor->spimem) { 429 struct spi_mem_op op = 430 SPI_NOR_READID_OP(naddr, ndummy, id, SPI_NOR_MAX_ID_LEN); 431 432 spi_nor_spimem_setup_op(nor, &op, proto); 433 ret = spi_mem_exec_op(nor->spimem, &op); 434 } else { 435 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, 436 SPI_NOR_MAX_ID_LEN); 437 } 438 return ret; 439 } 440 441 /** 442 * spi_nor_read_sr() - Read the Status Register. 443 * @nor: pointer to 'struct spi_nor'. 444 * @sr: pointer to a DMA-able buffer where the value of the 445 * Status Register will be written. Should be at least 2 bytes. 446 * 447 * Return: 0 on success, -errno otherwise. 448 */ 449 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) 450 { 451 int ret; 452 453 if (nor->spimem) { 454 struct spi_mem_op op = SPI_NOR_RDSR_OP(sr); 455 456 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { 457 op.addr.nbytes = nor->params->rdsr_addr_nbytes; 458 op.dummy.nbytes = nor->params->rdsr_dummy; 459 /* 460 * We don't want to read only one byte in DTR mode. So, 461 * read 2 and then discard the second byte. 462 */ 463 op.data.nbytes = 2; 464 } 465 466 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 467 468 ret = spi_mem_exec_op(nor->spimem, &op); 469 } else { 470 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr, 471 1); 472 } 473 474 if (ret) 475 dev_dbg(nor->dev, "error %d reading SR\n", ret); 476 477 return ret; 478 } 479 480 /** 481 * spi_nor_read_cr() - Read the Configuration Register using the 482 * SPINOR_OP_RDCR (35h) command. 483 * @nor: pointer to 'struct spi_nor' 484 * @cr: pointer to a DMA-able buffer where the value of the 485 * Configuration Register will be written. 486 * 487 * Return: 0 on success, -errno otherwise. 488 */ 489 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) 490 { 491 int ret; 492 493 if (nor->spimem) { 494 struct spi_mem_op op = SPI_NOR_RDCR_OP(cr); 495 496 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 497 498 ret = spi_mem_exec_op(nor->spimem, &op); 499 } else { 500 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr, 501 1); 502 } 503 504 if (ret) 505 dev_dbg(nor->dev, "error %d reading CR\n", ret); 506 507 return ret; 508 } 509 510 /** 511 * spi_nor_set_4byte_addr_mode_en4b_ex4b() - Enter/Exit 4-byte address mode 512 * using SPINOR_OP_EN4B/SPINOR_OP_EX4B. Typically used by 513 * Winbond and Macronix. 514 * @nor: pointer to 'struct spi_nor'. 515 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 516 * address mode. 517 * 518 * Return: 0 on success, -errno otherwise. 519 */ 520 int spi_nor_set_4byte_addr_mode_en4b_ex4b(struct spi_nor *nor, bool enable) 521 { 522 int ret; 523 524 if (nor->spimem) { 525 struct spi_mem_op op = SPI_NOR_EN4B_EX4B_OP(enable); 526 527 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 528 529 ret = spi_mem_exec_op(nor->spimem, &op); 530 } else { 531 ret = spi_nor_controller_ops_write_reg(nor, 532 enable ? SPINOR_OP_EN4B : 533 SPINOR_OP_EX4B, 534 NULL, 0); 535 } 536 537 if (ret) 538 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 539 540 return ret; 541 } 542 543 /** 544 * spi_nor_set_4byte_addr_mode_wren_en4b_ex4b() - Set 4-byte address mode using 545 * SPINOR_OP_WREN followed by SPINOR_OP_EN4B or SPINOR_OP_EX4B. Typically used 546 * by ST and Micron flashes. 547 * @nor: pointer to 'struct spi_nor'. 548 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 549 * address mode. 550 * 551 * Return: 0 on success, -errno otherwise. 552 */ 553 int spi_nor_set_4byte_addr_mode_wren_en4b_ex4b(struct spi_nor *nor, bool enable) 554 { 555 int ret; 556 557 ret = spi_nor_write_enable(nor); 558 if (ret) 559 return ret; 560 561 ret = spi_nor_set_4byte_addr_mode_en4b_ex4b(nor, enable); 562 if (ret) 563 return ret; 564 565 return spi_nor_write_disable(nor); 566 } 567 568 /** 569 * spi_nor_set_4byte_addr_mode_brwr() - Set 4-byte address mode using 570 * SPINOR_OP_BRWR. Typically used by Spansion flashes. 571 * @nor: pointer to 'struct spi_nor'. 572 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 573 * address mode. 574 * 575 * 8-bit volatile bank register used to define A[30:A24] bits. MSB (bit[7]) is 576 * used to enable/disable 4-byte address mode. When MSB is set to ‘1’, 4-byte 577 * address mode is active and A[30:24] bits are don’t care. Write instruction is 578 * SPINOR_OP_BRWR(17h) with 1 byte of data. 579 * 580 * Return: 0 on success, -errno otherwise. 581 */ 582 int spi_nor_set_4byte_addr_mode_brwr(struct spi_nor *nor, bool enable) 583 { 584 int ret; 585 586 nor->bouncebuf[0] = enable << 7; 587 588 if (nor->spimem) { 589 struct spi_mem_op op = SPI_NOR_BRWR_OP(nor->bouncebuf); 590 591 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 592 593 ret = spi_mem_exec_op(nor->spimem, &op); 594 } else { 595 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR, 596 nor->bouncebuf, 1); 597 } 598 599 if (ret) 600 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 601 602 return ret; 603 } 604 605 /** 606 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready 607 * for new commands. 608 * @nor: pointer to 'struct spi_nor'. 609 * 610 * Return: 1 if ready, 0 if not ready, -errno on errors. 611 */ 612 int spi_nor_sr_ready(struct spi_nor *nor) 613 { 614 int ret; 615 616 ret = spi_nor_read_sr(nor, nor->bouncebuf); 617 if (ret) 618 return ret; 619 620 return !(nor->bouncebuf[0] & SR_WIP); 621 } 622 623 /** 624 * spi_nor_use_parallel_locking() - Checks if RWW locking scheme shall be used 625 * @nor: pointer to 'struct spi_nor'. 626 * 627 * Return: true if parallel locking is enabled, false otherwise. 628 */ 629 static bool spi_nor_use_parallel_locking(struct spi_nor *nor) 630 { 631 return nor->flags & SNOR_F_RWW; 632 } 633 634 /* Locking helpers for status read operations */ 635 static int spi_nor_rww_start_rdst(struct spi_nor *nor) 636 { 637 struct spi_nor_rww *rww = &nor->rww; 638 int ret = -EAGAIN; 639 640 mutex_lock(&nor->lock); 641 642 if (rww->ongoing_io || rww->ongoing_rd) 643 goto busy; 644 645 rww->ongoing_io = true; 646 rww->ongoing_rd = true; 647 ret = 0; 648 649 busy: 650 mutex_unlock(&nor->lock); 651 return ret; 652 } 653 654 static void spi_nor_rww_end_rdst(struct spi_nor *nor) 655 { 656 struct spi_nor_rww *rww = &nor->rww; 657 658 mutex_lock(&nor->lock); 659 660 rww->ongoing_io = false; 661 rww->ongoing_rd = false; 662 663 mutex_unlock(&nor->lock); 664 } 665 666 static int spi_nor_lock_rdst(struct spi_nor *nor) 667 { 668 if (spi_nor_use_parallel_locking(nor)) 669 return spi_nor_rww_start_rdst(nor); 670 671 return 0; 672 } 673 674 static void spi_nor_unlock_rdst(struct spi_nor *nor) 675 { 676 if (spi_nor_use_parallel_locking(nor)) { 677 spi_nor_rww_end_rdst(nor); 678 wake_up(&nor->rww.wait); 679 } 680 } 681 682 /** 683 * spi_nor_ready() - Query the flash to see if it is ready for new commands. 684 * @nor: pointer to 'struct spi_nor'. 685 * 686 * Return: 1 if ready, 0 if not ready, -errno on errors. 687 */ 688 static int spi_nor_ready(struct spi_nor *nor) 689 { 690 int ret; 691 692 ret = spi_nor_lock_rdst(nor); 693 if (ret) 694 return 0; 695 696 /* Flashes might override the standard routine. */ 697 if (nor->params->ready) 698 ret = nor->params->ready(nor); 699 else 700 ret = spi_nor_sr_ready(nor); 701 702 spi_nor_unlock_rdst(nor); 703 704 return ret; 705 } 706 707 /** 708 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the 709 * Status Register until ready, or timeout occurs. 710 * @nor: pointer to "struct spi_nor". 711 * @timeout_jiffies: jiffies to wait until timeout. 712 * 713 * Return: 0 on success, -errno otherwise. 714 */ 715 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, 716 unsigned long timeout_jiffies) 717 { 718 unsigned long deadline; 719 int timeout = 0, ret; 720 721 deadline = jiffies + timeout_jiffies; 722 723 while (!timeout) { 724 if (time_after_eq(jiffies, deadline)) 725 timeout = 1; 726 727 ret = spi_nor_ready(nor); 728 if (ret < 0) 729 return ret; 730 if (ret) 731 return 0; 732 733 cond_resched(); 734 } 735 736 dev_dbg(nor->dev, "flash operation timed out\n"); 737 738 return -ETIMEDOUT; 739 } 740 741 /** 742 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the 743 * flash to be ready, or timeout occurs. 744 * @nor: pointer to "struct spi_nor". 745 * 746 * Return: 0 on success, -errno otherwise. 747 */ 748 int spi_nor_wait_till_ready(struct spi_nor *nor) 749 { 750 return spi_nor_wait_till_ready_with_timeout(nor, 751 DEFAULT_READY_WAIT_JIFFIES); 752 } 753 754 /** 755 * spi_nor_global_block_unlock() - Unlock Global Block Protection. 756 * @nor: pointer to 'struct spi_nor'. 757 * 758 * Return: 0 on success, -errno otherwise. 759 */ 760 int spi_nor_global_block_unlock(struct spi_nor *nor) 761 { 762 int ret; 763 764 ret = spi_nor_write_enable(nor); 765 if (ret) 766 return ret; 767 768 if (nor->spimem) { 769 struct spi_mem_op op = SPI_NOR_GBULK_OP; 770 771 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 772 773 ret = spi_mem_exec_op(nor->spimem, &op); 774 } else { 775 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK, 776 NULL, 0); 777 } 778 779 if (ret) { 780 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret); 781 return ret; 782 } 783 784 return spi_nor_wait_till_ready(nor); 785 } 786 787 /** 788 * spi_nor_write_sr() - Write the Status Register. 789 * @nor: pointer to 'struct spi_nor'. 790 * @sr: pointer to DMA-able buffer to write to the Status Register. 791 * @len: number of bytes to write to the Status Register. 792 * 793 * Return: 0 on success, -errno otherwise. 794 */ 795 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) 796 { 797 int ret; 798 799 ret = spi_nor_write_enable(nor); 800 if (ret) 801 return ret; 802 803 if (nor->spimem) { 804 struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len); 805 806 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 807 808 ret = spi_mem_exec_op(nor->spimem, &op); 809 } else { 810 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr, 811 len); 812 } 813 814 if (ret) { 815 dev_dbg(nor->dev, "error %d writing SR\n", ret); 816 return ret; 817 } 818 819 return spi_nor_wait_till_ready(nor); 820 } 821 822 /** 823 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and 824 * ensure that the byte written match the received value. 825 * @nor: pointer to a 'struct spi_nor'. 826 * @sr1: byte value to be written to the Status Register. 827 * 828 * Return: 0 on success, -errno otherwise. 829 */ 830 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1) 831 { 832 int ret; 833 834 nor->bouncebuf[0] = sr1; 835 836 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1); 837 if (ret) 838 return ret; 839 840 ret = spi_nor_read_sr(nor, nor->bouncebuf); 841 if (ret) 842 return ret; 843 844 if (nor->bouncebuf[0] != sr1) { 845 dev_dbg(nor->dev, "SR1: read back test failed\n"); 846 return -EIO; 847 } 848 849 return 0; 850 } 851 852 /** 853 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the 854 * Status Register 2 in one shot. Ensure that the byte written in the Status 855 * Register 1 match the received value, and that the 16-bit Write did not 856 * affect what was already in the Status Register 2. 857 * @nor: pointer to a 'struct spi_nor'. 858 * @sr1: byte value to be written to the Status Register 1. 859 * 860 * Return: 0 on success, -errno otherwise. 861 */ 862 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) 863 { 864 int ret; 865 u8 *sr_cr = nor->bouncebuf; 866 u8 cr_written; 867 868 /* Make sure we don't overwrite the contents of Status Register 2. */ 869 if (!(nor->flags & SNOR_F_NO_READ_CR)) { 870 ret = spi_nor_read_cr(nor, &sr_cr[1]); 871 if (ret) 872 return ret; 873 } else if (spi_nor_get_protocol_width(nor->read_proto) == 4 && 874 spi_nor_get_protocol_width(nor->write_proto) == 4 && 875 nor->params->quad_enable) { 876 /* 877 * If the Status Register 2 Read command (35h) is not 878 * supported, we should at least be sure we don't 879 * change the value of the SR2 Quad Enable bit. 880 * 881 * When the Quad Enable method is set and the buswidth is 4, we 882 * can safely assume that the value of the QE bit is one, as a 883 * consequence of the nor->params->quad_enable() call. 884 * 885 * According to the JESD216 revB standard, BFPT DWORDS[15], 886 * bits 22:20, the 16-bit Write Status (01h) command is 887 * available just for the cases in which the QE bit is 888 * described in SR2 at BIT(1). 889 */ 890 sr_cr[1] = SR2_QUAD_EN_BIT1; 891 } else { 892 sr_cr[1] = 0; 893 } 894 895 sr_cr[0] = sr1; 896 897 ret = spi_nor_write_sr(nor, sr_cr, 2); 898 if (ret) 899 return ret; 900 901 ret = spi_nor_read_sr(nor, sr_cr); 902 if (ret) 903 return ret; 904 905 if (sr1 != sr_cr[0]) { 906 dev_dbg(nor->dev, "SR: Read back test failed\n"); 907 return -EIO; 908 } 909 910 if (nor->flags & SNOR_F_NO_READ_CR) 911 return 0; 912 913 cr_written = sr_cr[1]; 914 915 ret = spi_nor_read_cr(nor, &sr_cr[1]); 916 if (ret) 917 return ret; 918 919 if (cr_written != sr_cr[1]) { 920 dev_dbg(nor->dev, "CR: read back test failed\n"); 921 return -EIO; 922 } 923 924 return 0; 925 } 926 927 /** 928 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the 929 * Configuration Register in one shot. Ensure that the byte written in the 930 * Configuration Register match the received value, and that the 16-bit Write 931 * did not affect what was already in the Status Register 1. 932 * @nor: pointer to a 'struct spi_nor'. 933 * @cr: byte value to be written to the Configuration Register. 934 * 935 * Return: 0 on success, -errno otherwise. 936 */ 937 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) 938 { 939 int ret; 940 u8 *sr_cr = nor->bouncebuf; 941 u8 sr_written; 942 943 /* Keep the current value of the Status Register 1. */ 944 ret = spi_nor_read_sr(nor, sr_cr); 945 if (ret) 946 return ret; 947 948 sr_cr[1] = cr; 949 950 ret = spi_nor_write_sr(nor, sr_cr, 2); 951 if (ret) 952 return ret; 953 954 sr_written = sr_cr[0]; 955 956 ret = spi_nor_read_sr(nor, sr_cr); 957 if (ret) 958 return ret; 959 960 if (sr_written != sr_cr[0]) { 961 dev_dbg(nor->dev, "SR: Read back test failed\n"); 962 return -EIO; 963 } 964 965 if (nor->flags & SNOR_F_NO_READ_CR) 966 return 0; 967 968 ret = spi_nor_read_cr(nor, &sr_cr[1]); 969 if (ret) 970 return ret; 971 972 if (cr != sr_cr[1]) { 973 dev_dbg(nor->dev, "CR: read back test failed\n"); 974 return -EIO; 975 } 976 977 return 0; 978 } 979 980 /** 981 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that 982 * the byte written match the received value without affecting other bits in the 983 * Status Register 1 and 2. 984 * @nor: pointer to a 'struct spi_nor'. 985 * @sr1: byte value to be written to the Status Register. 986 * 987 * Return: 0 on success, -errno otherwise. 988 */ 989 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) 990 { 991 if (nor->flags & SNOR_F_HAS_16BIT_SR) 992 return spi_nor_write_16bit_sr_and_check(nor, sr1); 993 994 return spi_nor_write_sr1_and_check(nor, sr1); 995 } 996 997 /** 998 * spi_nor_write_sr2() - Write the Status Register 2 using the 999 * SPINOR_OP_WRSR2 (3eh) command. 1000 * @nor: pointer to 'struct spi_nor'. 1001 * @sr2: pointer to DMA-able buffer to write to the Status Register 2. 1002 * 1003 * Return: 0 on success, -errno otherwise. 1004 */ 1005 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) 1006 { 1007 int ret; 1008 1009 ret = spi_nor_write_enable(nor); 1010 if (ret) 1011 return ret; 1012 1013 if (nor->spimem) { 1014 struct spi_mem_op op = SPI_NOR_WRSR2_OP(sr2); 1015 1016 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1017 1018 ret = spi_mem_exec_op(nor->spimem, &op); 1019 } else { 1020 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2, 1021 sr2, 1); 1022 } 1023 1024 if (ret) { 1025 dev_dbg(nor->dev, "error %d writing SR2\n", ret); 1026 return ret; 1027 } 1028 1029 return spi_nor_wait_till_ready(nor); 1030 } 1031 1032 /** 1033 * spi_nor_read_sr2() - Read the Status Register 2 using the 1034 * SPINOR_OP_RDSR2 (3fh) command. 1035 * @nor: pointer to 'struct spi_nor'. 1036 * @sr2: pointer to DMA-able buffer where the value of the 1037 * Status Register 2 will be written. 1038 * 1039 * Return: 0 on success, -errno otherwise. 1040 */ 1041 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) 1042 { 1043 int ret; 1044 1045 if (nor->spimem) { 1046 struct spi_mem_op op = SPI_NOR_RDSR2_OP(sr2); 1047 1048 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1049 1050 ret = spi_mem_exec_op(nor->spimem, &op); 1051 } else { 1052 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2, 1053 1); 1054 } 1055 1056 if (ret) 1057 dev_dbg(nor->dev, "error %d reading SR2\n", ret); 1058 1059 return ret; 1060 } 1061 1062 /** 1063 * spi_nor_erase_die() - Erase the entire die. 1064 * @nor: pointer to 'struct spi_nor'. 1065 * @addr: address of the die. 1066 * @die_size: size of the die. 1067 * 1068 * Return: 0 on success, -errno otherwise. 1069 */ 1070 static int spi_nor_erase_die(struct spi_nor *nor, loff_t addr, size_t die_size) 1071 { 1072 bool multi_die = nor->mtd.size != die_size; 1073 int ret; 1074 1075 dev_dbg(nor->dev, " %lldKiB\n", (long long)(die_size >> 10)); 1076 1077 if (nor->spimem) { 1078 struct spi_mem_op op = 1079 SPI_NOR_DIE_ERASE_OP(nor->params->die_erase_opcode, 1080 nor->addr_nbytes, addr, multi_die); 1081 1082 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1083 1084 ret = spi_mem_exec_op(nor->spimem, &op); 1085 } else { 1086 if (multi_die) 1087 return -EOPNOTSUPP; 1088 1089 ret = spi_nor_controller_ops_write_reg(nor, 1090 SPINOR_OP_CHIP_ERASE, 1091 NULL, 0); 1092 } 1093 1094 if (ret) 1095 dev_dbg(nor->dev, "error %d erasing chip\n", ret); 1096 1097 return ret; 1098 } 1099 1100 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) 1101 { 1102 size_t i; 1103 1104 for (i = 0; i < size; i++) 1105 if (table[i][0] == opcode) 1106 return table[i][1]; 1107 1108 /* No conversion found, keep input op code. */ 1109 return opcode; 1110 } 1111 1112 u8 spi_nor_convert_3to4_read(u8 opcode) 1113 { 1114 static const u8 spi_nor_3to4_read[][2] = { 1115 { SPINOR_OP_READ, SPINOR_OP_READ_4B }, 1116 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, 1117 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, 1118 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, 1119 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, 1120 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, 1121 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, 1122 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, 1123 1124 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, 1125 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, 1126 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, 1127 }; 1128 1129 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, 1130 ARRAY_SIZE(spi_nor_3to4_read)); 1131 } 1132 1133 static u8 spi_nor_convert_3to4_program(u8 opcode) 1134 { 1135 static const u8 spi_nor_3to4_program[][2] = { 1136 { SPINOR_OP_PP, SPINOR_OP_PP_4B }, 1137 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, 1138 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, 1139 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, 1140 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, 1141 }; 1142 1143 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, 1144 ARRAY_SIZE(spi_nor_3to4_program)); 1145 } 1146 1147 static u8 spi_nor_convert_3to4_erase(u8 opcode) 1148 { 1149 static const u8 spi_nor_3to4_erase[][2] = { 1150 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, 1151 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, 1152 { SPINOR_OP_SE, SPINOR_OP_SE_4B }, 1153 }; 1154 1155 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, 1156 ARRAY_SIZE(spi_nor_3to4_erase)); 1157 } 1158 1159 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) 1160 { 1161 return !!nor->params->erase_map.uniform_erase_type; 1162 } 1163 1164 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) 1165 { 1166 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); 1167 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); 1168 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); 1169 1170 if (!spi_nor_has_uniform_erase(nor)) { 1171 struct spi_nor_erase_map *map = &nor->params->erase_map; 1172 struct spi_nor_erase_type *erase; 1173 int i; 1174 1175 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1176 erase = &map->erase_type[i]; 1177 erase->opcode = 1178 spi_nor_convert_3to4_erase(erase->opcode); 1179 } 1180 } 1181 } 1182 1183 static int spi_nor_prep(struct spi_nor *nor) 1184 { 1185 int ret = 0; 1186 1187 if (nor->controller_ops && nor->controller_ops->prepare) 1188 ret = nor->controller_ops->prepare(nor); 1189 1190 return ret; 1191 } 1192 1193 static void spi_nor_unprep(struct spi_nor *nor) 1194 { 1195 if (nor->controller_ops && nor->controller_ops->unprepare) 1196 nor->controller_ops->unprepare(nor); 1197 } 1198 1199 static void spi_nor_offset_to_banks(u64 bank_size, loff_t start, size_t len, 1200 u8 *first, u8 *last) 1201 { 1202 /* This is currently safe, the number of banks being very small */ 1203 *first = DIV_ROUND_DOWN_ULL(start, bank_size); 1204 *last = DIV_ROUND_DOWN_ULL(start + len - 1, bank_size); 1205 } 1206 1207 /* Generic helpers for internal locking and serialization */ 1208 static bool spi_nor_rww_start_io(struct spi_nor *nor) 1209 { 1210 struct spi_nor_rww *rww = &nor->rww; 1211 bool start = false; 1212 1213 mutex_lock(&nor->lock); 1214 1215 if (rww->ongoing_io) 1216 goto busy; 1217 1218 rww->ongoing_io = true; 1219 start = true; 1220 1221 busy: 1222 mutex_unlock(&nor->lock); 1223 return start; 1224 } 1225 1226 static void spi_nor_rww_end_io(struct spi_nor *nor) 1227 { 1228 mutex_lock(&nor->lock); 1229 nor->rww.ongoing_io = false; 1230 mutex_unlock(&nor->lock); 1231 } 1232 1233 static int spi_nor_lock_device(struct spi_nor *nor) 1234 { 1235 if (!spi_nor_use_parallel_locking(nor)) 1236 return 0; 1237 1238 return wait_event_killable(nor->rww.wait, spi_nor_rww_start_io(nor)); 1239 } 1240 1241 static void spi_nor_unlock_device(struct spi_nor *nor) 1242 { 1243 if (spi_nor_use_parallel_locking(nor)) { 1244 spi_nor_rww_end_io(nor); 1245 wake_up(&nor->rww.wait); 1246 } 1247 } 1248 1249 /* Generic helpers for internal locking and serialization */ 1250 static bool spi_nor_rww_start_exclusive(struct spi_nor *nor) 1251 { 1252 struct spi_nor_rww *rww = &nor->rww; 1253 bool start = false; 1254 1255 mutex_lock(&nor->lock); 1256 1257 if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) 1258 goto busy; 1259 1260 rww->ongoing_io = true; 1261 rww->ongoing_rd = true; 1262 rww->ongoing_pe = true; 1263 start = true; 1264 1265 busy: 1266 mutex_unlock(&nor->lock); 1267 return start; 1268 } 1269 1270 static void spi_nor_rww_end_exclusive(struct spi_nor *nor) 1271 { 1272 struct spi_nor_rww *rww = &nor->rww; 1273 1274 mutex_lock(&nor->lock); 1275 rww->ongoing_io = false; 1276 rww->ongoing_rd = false; 1277 rww->ongoing_pe = false; 1278 mutex_unlock(&nor->lock); 1279 } 1280 1281 int spi_nor_prep_and_lock(struct spi_nor *nor) 1282 { 1283 int ret; 1284 1285 ret = spi_nor_prep(nor); 1286 if (ret) 1287 return ret; 1288 1289 if (!spi_nor_use_parallel_locking(nor)) 1290 mutex_lock(&nor->lock); 1291 else 1292 ret = wait_event_killable(nor->rww.wait, 1293 spi_nor_rww_start_exclusive(nor)); 1294 1295 return ret; 1296 } 1297 1298 void spi_nor_unlock_and_unprep(struct spi_nor *nor) 1299 { 1300 if (!spi_nor_use_parallel_locking(nor)) { 1301 mutex_unlock(&nor->lock); 1302 } else { 1303 spi_nor_rww_end_exclusive(nor); 1304 wake_up(&nor->rww.wait); 1305 } 1306 1307 spi_nor_unprep(nor); 1308 } 1309 1310 /* Internal locking helpers for program and erase operations */ 1311 static bool spi_nor_rww_start_pe(struct spi_nor *nor, loff_t start, size_t len) 1312 { 1313 struct spi_nor_rww *rww = &nor->rww; 1314 unsigned int used_banks = 0; 1315 bool started = false; 1316 u8 first, last; 1317 int bank; 1318 1319 mutex_lock(&nor->lock); 1320 1321 if (rww->ongoing_io || rww->ongoing_rd || rww->ongoing_pe) 1322 goto busy; 1323 1324 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1325 for (bank = first; bank <= last; bank++) { 1326 if (rww->used_banks & BIT(bank)) 1327 goto busy; 1328 1329 used_banks |= BIT(bank); 1330 } 1331 1332 rww->used_banks |= used_banks; 1333 rww->ongoing_pe = true; 1334 started = true; 1335 1336 busy: 1337 mutex_unlock(&nor->lock); 1338 return started; 1339 } 1340 1341 static void spi_nor_rww_end_pe(struct spi_nor *nor, loff_t start, size_t len) 1342 { 1343 struct spi_nor_rww *rww = &nor->rww; 1344 u8 first, last; 1345 int bank; 1346 1347 mutex_lock(&nor->lock); 1348 1349 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1350 for (bank = first; bank <= last; bank++) 1351 rww->used_banks &= ~BIT(bank); 1352 1353 rww->ongoing_pe = false; 1354 1355 mutex_unlock(&nor->lock); 1356 } 1357 1358 static int spi_nor_prep_and_lock_pe(struct spi_nor *nor, loff_t start, size_t len) 1359 { 1360 int ret; 1361 1362 ret = spi_nor_prep(nor); 1363 if (ret) 1364 return ret; 1365 1366 if (!spi_nor_use_parallel_locking(nor)) 1367 mutex_lock(&nor->lock); 1368 else 1369 ret = wait_event_killable(nor->rww.wait, 1370 spi_nor_rww_start_pe(nor, start, len)); 1371 1372 return ret; 1373 } 1374 1375 static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len) 1376 { 1377 if (!spi_nor_use_parallel_locking(nor)) { 1378 mutex_unlock(&nor->lock); 1379 } else { 1380 spi_nor_rww_end_pe(nor, start, len); 1381 wake_up(&nor->rww.wait); 1382 } 1383 1384 spi_nor_unprep(nor); 1385 } 1386 1387 /* Internal locking helpers for read operations */ 1388 static bool spi_nor_rww_start_rd(struct spi_nor *nor, loff_t start, size_t len) 1389 { 1390 struct spi_nor_rww *rww = &nor->rww; 1391 unsigned int used_banks = 0; 1392 bool started = false; 1393 u8 first, last; 1394 int bank; 1395 1396 mutex_lock(&nor->lock); 1397 1398 if (rww->ongoing_io || rww->ongoing_rd) 1399 goto busy; 1400 1401 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1402 for (bank = first; bank <= last; bank++) { 1403 if (rww->used_banks & BIT(bank)) 1404 goto busy; 1405 1406 used_banks |= BIT(bank); 1407 } 1408 1409 rww->used_banks |= used_banks; 1410 rww->ongoing_io = true; 1411 rww->ongoing_rd = true; 1412 started = true; 1413 1414 busy: 1415 mutex_unlock(&nor->lock); 1416 return started; 1417 } 1418 1419 static void spi_nor_rww_end_rd(struct spi_nor *nor, loff_t start, size_t len) 1420 { 1421 struct spi_nor_rww *rww = &nor->rww; 1422 u8 first, last; 1423 int bank; 1424 1425 mutex_lock(&nor->lock); 1426 1427 spi_nor_offset_to_banks(nor->params->bank_size, start, len, &first, &last); 1428 for (bank = first; bank <= last; bank++) 1429 nor->rww.used_banks &= ~BIT(bank); 1430 1431 rww->ongoing_io = false; 1432 rww->ongoing_rd = false; 1433 1434 mutex_unlock(&nor->lock); 1435 } 1436 1437 static int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len) 1438 { 1439 int ret; 1440 1441 ret = spi_nor_prep(nor); 1442 if (ret) 1443 return ret; 1444 1445 if (!spi_nor_use_parallel_locking(nor)) 1446 mutex_lock(&nor->lock); 1447 else 1448 ret = wait_event_killable(nor->rww.wait, 1449 spi_nor_rww_start_rd(nor, start, len)); 1450 1451 return ret; 1452 } 1453 1454 static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len) 1455 { 1456 if (!spi_nor_use_parallel_locking(nor)) { 1457 mutex_unlock(&nor->lock); 1458 } else { 1459 spi_nor_rww_end_rd(nor, start, len); 1460 wake_up(&nor->rww.wait); 1461 } 1462 1463 spi_nor_unprep(nor); 1464 } 1465 1466 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr) 1467 { 1468 if (!nor->params->convert_addr) 1469 return addr; 1470 1471 return nor->params->convert_addr(nor, addr); 1472 } 1473 1474 /* 1475 * Initiate the erasure of a single sector 1476 */ 1477 int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) 1478 { 1479 int i; 1480 1481 addr = spi_nor_convert_addr(nor, addr); 1482 1483 if (nor->spimem) { 1484 struct spi_mem_op op = 1485 SPI_NOR_SECTOR_ERASE_OP(nor->erase_opcode, 1486 nor->addr_nbytes, addr); 1487 1488 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1489 1490 return spi_mem_exec_op(nor->spimem, &op); 1491 } else if (nor->controller_ops->erase) { 1492 return spi_nor_controller_ops_erase(nor, addr); 1493 } 1494 1495 /* 1496 * Default implementation, if driver doesn't have a specialized HW 1497 * control 1498 */ 1499 for (i = nor->addr_nbytes - 1; i >= 0; i--) { 1500 nor->bouncebuf[i] = addr & 0xff; 1501 addr >>= 8; 1502 } 1503 1504 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode, 1505 nor->bouncebuf, nor->addr_nbytes); 1506 } 1507 1508 /** 1509 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend 1510 * @erase: pointer to a structure that describes a SPI NOR erase type 1511 * @dividend: dividend value 1512 * @remainder: pointer to u32 remainder (will be updated) 1513 * 1514 * Return: the result of the division 1515 */ 1516 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase, 1517 u64 dividend, u32 *remainder) 1518 { 1519 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 1520 *remainder = (u32)dividend & erase->size_mask; 1521 return dividend >> erase->size_shift; 1522 } 1523 1524 /** 1525 * spi_nor_find_best_erase_type() - find the best erase type for the given 1526 * offset in the serial flash memory and the 1527 * number of bytes to erase. The region in 1528 * which the address fits is expected to be 1529 * provided. 1530 * @map: the erase map of the SPI NOR 1531 * @region: pointer to a structure that describes a SPI NOR erase region 1532 * @addr: offset in the serial flash memory 1533 * @len: number of bytes to erase 1534 * 1535 * Return: a pointer to the best fitted erase type, NULL otherwise. 1536 */ 1537 static const struct spi_nor_erase_type * 1538 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, 1539 const struct spi_nor_erase_region *region, 1540 u64 addr, u32 len) 1541 { 1542 const struct spi_nor_erase_type *erase; 1543 u32 rem; 1544 int i; 1545 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 1546 1547 /* 1548 * Erase types are ordered by size, with the smallest erase type at 1549 * index 0. 1550 */ 1551 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 1552 /* Does the erase region support the tested erase type? */ 1553 if (!(erase_mask & BIT(i))) 1554 continue; 1555 1556 erase = &map->erase_type[i]; 1557 if (!erase->size) 1558 continue; 1559 1560 /* Alignment is not mandatory for overlaid regions */ 1561 if (region->offset & SNOR_OVERLAID_REGION && 1562 region->size <= len) 1563 return erase; 1564 1565 /* Don't erase more than what the user has asked for. */ 1566 if (erase->size > len) 1567 continue; 1568 1569 spi_nor_div_by_erase_size(erase, addr, &rem); 1570 if (!rem) 1571 return erase; 1572 } 1573 1574 return NULL; 1575 } 1576 1577 static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region) 1578 { 1579 return region->offset & SNOR_LAST_REGION; 1580 } 1581 1582 static u64 spi_nor_region_end(const struct spi_nor_erase_region *region) 1583 { 1584 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; 1585 } 1586 1587 /** 1588 * spi_nor_region_next() - get the next spi nor region 1589 * @region: pointer to a structure that describes a SPI NOR erase region 1590 * 1591 * Return: the next spi nor region or NULL if last region. 1592 */ 1593 struct spi_nor_erase_region * 1594 spi_nor_region_next(struct spi_nor_erase_region *region) 1595 { 1596 if (spi_nor_region_is_last(region)) 1597 return NULL; 1598 region++; 1599 return region; 1600 } 1601 1602 /** 1603 * spi_nor_find_erase_region() - find the region of the serial flash memory in 1604 * which the offset fits 1605 * @map: the erase map of the SPI NOR 1606 * @addr: offset in the serial flash memory 1607 * 1608 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno) 1609 * otherwise. 1610 */ 1611 static struct spi_nor_erase_region * 1612 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr) 1613 { 1614 struct spi_nor_erase_region *region = map->regions; 1615 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; 1616 u64 region_end = region_start + region->size; 1617 1618 while (addr < region_start || addr >= region_end) { 1619 region = spi_nor_region_next(region); 1620 if (!region) 1621 return ERR_PTR(-EINVAL); 1622 1623 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; 1624 region_end = region_start + region->size; 1625 } 1626 1627 return region; 1628 } 1629 1630 /** 1631 * spi_nor_init_erase_cmd() - initialize an erase command 1632 * @region: pointer to a structure that describes a SPI NOR erase region 1633 * @erase: pointer to a structure that describes a SPI NOR erase type 1634 * 1635 * Return: the pointer to the allocated erase command, ERR_PTR(-errno) 1636 * otherwise. 1637 */ 1638 static struct spi_nor_erase_command * 1639 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region, 1640 const struct spi_nor_erase_type *erase) 1641 { 1642 struct spi_nor_erase_command *cmd; 1643 1644 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 1645 if (!cmd) 1646 return ERR_PTR(-ENOMEM); 1647 1648 INIT_LIST_HEAD(&cmd->list); 1649 cmd->opcode = erase->opcode; 1650 cmd->count = 1; 1651 1652 if (region->offset & SNOR_OVERLAID_REGION) 1653 cmd->size = region->size; 1654 else 1655 cmd->size = erase->size; 1656 1657 return cmd; 1658 } 1659 1660 /** 1661 * spi_nor_destroy_erase_cmd_list() - destroy erase command list 1662 * @erase_list: list of erase commands 1663 */ 1664 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list) 1665 { 1666 struct spi_nor_erase_command *cmd, *next; 1667 1668 list_for_each_entry_safe(cmd, next, erase_list, list) { 1669 list_del(&cmd->list); 1670 kfree(cmd); 1671 } 1672 } 1673 1674 /** 1675 * spi_nor_init_erase_cmd_list() - initialize erase command list 1676 * @nor: pointer to a 'struct spi_nor' 1677 * @erase_list: list of erase commands to be executed once we validate that the 1678 * erase can be performed 1679 * @addr: offset in the serial flash memory 1680 * @len: number of bytes to erase 1681 * 1682 * Builds the list of best fitted erase commands and verifies if the erase can 1683 * be performed. 1684 * 1685 * Return: 0 on success, -errno otherwise. 1686 */ 1687 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, 1688 struct list_head *erase_list, 1689 u64 addr, u32 len) 1690 { 1691 const struct spi_nor_erase_map *map = &nor->params->erase_map; 1692 const struct spi_nor_erase_type *erase, *prev_erase = NULL; 1693 struct spi_nor_erase_region *region; 1694 struct spi_nor_erase_command *cmd = NULL; 1695 u64 region_end; 1696 int ret = -EINVAL; 1697 1698 region = spi_nor_find_erase_region(map, addr); 1699 if (IS_ERR(region)) 1700 return PTR_ERR(region); 1701 1702 region_end = spi_nor_region_end(region); 1703 1704 while (len) { 1705 erase = spi_nor_find_best_erase_type(map, region, addr, len); 1706 if (!erase) 1707 goto destroy_erase_cmd_list; 1708 1709 if (prev_erase != erase || 1710 erase->size != cmd->size || 1711 region->offset & SNOR_OVERLAID_REGION) { 1712 cmd = spi_nor_init_erase_cmd(region, erase); 1713 if (IS_ERR(cmd)) { 1714 ret = PTR_ERR(cmd); 1715 goto destroy_erase_cmd_list; 1716 } 1717 1718 list_add_tail(&cmd->list, erase_list); 1719 } else { 1720 cmd->count++; 1721 } 1722 1723 addr += cmd->size; 1724 len -= cmd->size; 1725 1726 if (len && addr >= region_end) { 1727 region = spi_nor_region_next(region); 1728 if (!region) 1729 goto destroy_erase_cmd_list; 1730 region_end = spi_nor_region_end(region); 1731 } 1732 1733 prev_erase = erase; 1734 } 1735 1736 return 0; 1737 1738 destroy_erase_cmd_list: 1739 spi_nor_destroy_erase_cmd_list(erase_list); 1740 return ret; 1741 } 1742 1743 /** 1744 * spi_nor_erase_multi_sectors() - perform a non-uniform erase 1745 * @nor: pointer to a 'struct spi_nor' 1746 * @addr: offset in the serial flash memory 1747 * @len: number of bytes to erase 1748 * 1749 * Build a list of best fitted erase commands and execute it once we validate 1750 * that the erase can be performed. 1751 * 1752 * Return: 0 on success, -errno otherwise. 1753 */ 1754 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) 1755 { 1756 LIST_HEAD(erase_list); 1757 struct spi_nor_erase_command *cmd, *next; 1758 int ret; 1759 1760 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len); 1761 if (ret) 1762 return ret; 1763 1764 list_for_each_entry_safe(cmd, next, &erase_list, list) { 1765 nor->erase_opcode = cmd->opcode; 1766 while (cmd->count) { 1767 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n", 1768 cmd->size, cmd->opcode, cmd->count); 1769 1770 ret = spi_nor_lock_device(nor); 1771 if (ret) 1772 goto destroy_erase_cmd_list; 1773 1774 ret = spi_nor_write_enable(nor); 1775 if (ret) { 1776 spi_nor_unlock_device(nor); 1777 goto destroy_erase_cmd_list; 1778 } 1779 1780 ret = spi_nor_erase_sector(nor, addr); 1781 spi_nor_unlock_device(nor); 1782 if (ret) 1783 goto destroy_erase_cmd_list; 1784 1785 ret = spi_nor_wait_till_ready(nor); 1786 if (ret) 1787 goto destroy_erase_cmd_list; 1788 1789 addr += cmd->size; 1790 cmd->count--; 1791 } 1792 list_del(&cmd->list); 1793 kfree(cmd); 1794 } 1795 1796 return 0; 1797 1798 destroy_erase_cmd_list: 1799 spi_nor_destroy_erase_cmd_list(&erase_list); 1800 return ret; 1801 } 1802 1803 static int spi_nor_erase_dice(struct spi_nor *nor, loff_t addr, 1804 size_t len, size_t die_size) 1805 { 1806 unsigned long timeout; 1807 int ret; 1808 1809 /* 1810 * Scale the timeout linearly with the size of the flash, with 1811 * a minimum calibrated to an old 2MB flash. We could try to 1812 * pull these from CFI/SFDP, but these values should be good 1813 * enough for now. 1814 */ 1815 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, 1816 CHIP_ERASE_2MB_READY_WAIT_JIFFIES * 1817 (unsigned long)(nor->mtd.size / SZ_2M)); 1818 1819 do { 1820 ret = spi_nor_lock_device(nor); 1821 if (ret) 1822 return ret; 1823 1824 ret = spi_nor_write_enable(nor); 1825 if (ret) { 1826 spi_nor_unlock_device(nor); 1827 return ret; 1828 } 1829 1830 ret = spi_nor_erase_die(nor, addr, die_size); 1831 1832 spi_nor_unlock_device(nor); 1833 if (ret) 1834 return ret; 1835 1836 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); 1837 if (ret) 1838 return ret; 1839 1840 addr += die_size; 1841 len -= die_size; 1842 1843 } while (len); 1844 1845 return 0; 1846 } 1847 1848 /* 1849 * Erase an address range on the nor chip. The address range may extend 1850 * one or more erase sectors. Return an error if there is a problem erasing. 1851 */ 1852 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) 1853 { 1854 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1855 u8 n_dice = nor->params->n_dice; 1856 bool multi_die_erase = false; 1857 u32 addr, len, rem; 1858 size_t die_size; 1859 int ret; 1860 1861 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, 1862 (long long)instr->len); 1863 1864 if (spi_nor_has_uniform_erase(nor)) { 1865 div_u64_rem(instr->len, mtd->erasesize, &rem); 1866 if (rem) 1867 return -EINVAL; 1868 } 1869 1870 addr = instr->addr; 1871 len = instr->len; 1872 1873 if (n_dice) { 1874 die_size = div_u64(mtd->size, n_dice); 1875 if (!(len & (die_size - 1)) && !(addr & (die_size - 1))) 1876 multi_die_erase = true; 1877 } else { 1878 die_size = mtd->size; 1879 } 1880 1881 ret = spi_nor_prep_and_lock_pe(nor, instr->addr, instr->len); 1882 if (ret) 1883 return ret; 1884 1885 /* chip (die) erase? */ 1886 if ((len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) || 1887 multi_die_erase) { 1888 ret = spi_nor_erase_dice(nor, addr, len, die_size); 1889 if (ret) 1890 goto erase_err; 1891 1892 /* REVISIT in some cases we could speed up erasing large regions 1893 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up 1894 * to use "small sector erase", but that's not always optimal. 1895 */ 1896 1897 /* "sector"-at-a-time erase */ 1898 } else if (spi_nor_has_uniform_erase(nor)) { 1899 while (len) { 1900 ret = spi_nor_lock_device(nor); 1901 if (ret) 1902 goto erase_err; 1903 1904 ret = spi_nor_write_enable(nor); 1905 if (ret) { 1906 spi_nor_unlock_device(nor); 1907 goto erase_err; 1908 } 1909 1910 ret = spi_nor_erase_sector(nor, addr); 1911 spi_nor_unlock_device(nor); 1912 if (ret) 1913 goto erase_err; 1914 1915 ret = spi_nor_wait_till_ready(nor); 1916 if (ret) 1917 goto erase_err; 1918 1919 addr += mtd->erasesize; 1920 len -= mtd->erasesize; 1921 } 1922 1923 /* erase multiple sectors */ 1924 } else { 1925 ret = spi_nor_erase_multi_sectors(nor, addr, len); 1926 if (ret) 1927 goto erase_err; 1928 } 1929 1930 ret = spi_nor_write_disable(nor); 1931 1932 erase_err: 1933 spi_nor_unlock_and_unprep_pe(nor, instr->addr, instr->len); 1934 1935 return ret; 1936 } 1937 1938 /** 1939 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status 1940 * Register 1. 1941 * @nor: pointer to a 'struct spi_nor' 1942 * 1943 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. 1944 * 1945 * Return: 0 on success, -errno otherwise. 1946 */ 1947 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) 1948 { 1949 int ret; 1950 1951 ret = spi_nor_read_sr(nor, nor->bouncebuf); 1952 if (ret) 1953 return ret; 1954 1955 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) 1956 return 0; 1957 1958 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; 1959 1960 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); 1961 } 1962 1963 /** 1964 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status 1965 * Register 2. 1966 * @nor: pointer to a 'struct spi_nor'. 1967 * 1968 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. 1969 * 1970 * Return: 0 on success, -errno otherwise. 1971 */ 1972 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) 1973 { 1974 int ret; 1975 1976 if (nor->flags & SNOR_F_NO_READ_CR) 1977 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); 1978 1979 ret = spi_nor_read_cr(nor, nor->bouncebuf); 1980 if (ret) 1981 return ret; 1982 1983 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) 1984 return 0; 1985 1986 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; 1987 1988 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); 1989 } 1990 1991 /** 1992 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. 1993 * @nor: pointer to a 'struct spi_nor' 1994 * 1995 * Set the Quad Enable (QE) bit in the Status Register 2. 1996 * 1997 * This is one of the procedures to set the QE bit described in the SFDP 1998 * (JESD216 rev B) specification but no manufacturer using this procedure has 1999 * been identified yet, hence the name of the function. 2000 * 2001 * Return: 0 on success, -errno otherwise. 2002 */ 2003 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) 2004 { 2005 u8 *sr2 = nor->bouncebuf; 2006 int ret; 2007 u8 sr2_written; 2008 2009 /* Check current Quad Enable bit value. */ 2010 ret = spi_nor_read_sr2(nor, sr2); 2011 if (ret) 2012 return ret; 2013 if (*sr2 & SR2_QUAD_EN_BIT7) 2014 return 0; 2015 2016 /* Update the Quad Enable bit. */ 2017 *sr2 |= SR2_QUAD_EN_BIT7; 2018 2019 ret = spi_nor_write_sr2(nor, sr2); 2020 if (ret) 2021 return ret; 2022 2023 sr2_written = *sr2; 2024 2025 /* Read back and check it. */ 2026 ret = spi_nor_read_sr2(nor, sr2); 2027 if (ret) 2028 return ret; 2029 2030 if (*sr2 != sr2_written) { 2031 dev_dbg(nor->dev, "SR2: Read back test failed\n"); 2032 return -EIO; 2033 } 2034 2035 return 0; 2036 } 2037 2038 static const struct spi_nor_manufacturer *manufacturers[] = { 2039 &spi_nor_atmel, 2040 &spi_nor_eon, 2041 &spi_nor_esmt, 2042 &spi_nor_everspin, 2043 &spi_nor_gigadevice, 2044 &spi_nor_intel, 2045 &spi_nor_issi, 2046 &spi_nor_macronix, 2047 &spi_nor_micron, 2048 &spi_nor_st, 2049 &spi_nor_spansion, 2050 &spi_nor_sst, 2051 &spi_nor_winbond, 2052 &spi_nor_xilinx, 2053 &spi_nor_xmc, 2054 }; 2055 2056 static const struct flash_info spi_nor_generic_flash = { 2057 .name = "spi-nor-generic", 2058 }; 2059 2060 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor, 2061 const u8 *id) 2062 { 2063 const struct flash_info *part; 2064 unsigned int i, j; 2065 2066 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 2067 for (j = 0; j < manufacturers[i]->nparts; j++) { 2068 part = &manufacturers[i]->parts[j]; 2069 if (part->id && 2070 !memcmp(part->id->bytes, id, part->id->len)) { 2071 nor->manufacturer = manufacturers[i]; 2072 return part; 2073 } 2074 } 2075 } 2076 2077 return NULL; 2078 } 2079 2080 static const struct flash_info *spi_nor_detect(struct spi_nor *nor) 2081 { 2082 const struct flash_info *info; 2083 u8 *id = nor->bouncebuf; 2084 int ret; 2085 2086 ret = spi_nor_read_id(nor, 0, 0, id, nor->reg_proto); 2087 if (ret) { 2088 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret); 2089 return ERR_PTR(ret); 2090 } 2091 2092 /* Cache the complete flash ID. */ 2093 nor->id = devm_kmemdup(nor->dev, id, SPI_NOR_MAX_ID_LEN, GFP_KERNEL); 2094 if (!nor->id) 2095 return ERR_PTR(-ENOMEM); 2096 2097 info = spi_nor_match_id(nor, id); 2098 2099 /* Fallback to a generic flash described only by its SFDP data. */ 2100 if (!info) { 2101 ret = spi_nor_check_sfdp_signature(nor); 2102 if (!ret) 2103 info = &spi_nor_generic_flash; 2104 } 2105 2106 if (!info) { 2107 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n", 2108 SPI_NOR_MAX_ID_LEN, id); 2109 return ERR_PTR(-ENODEV); 2110 } 2111 return info; 2112 } 2113 2114 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, 2115 size_t *retlen, u_char *buf) 2116 { 2117 struct spi_nor *nor = mtd_to_spi_nor(mtd); 2118 loff_t from_lock = from; 2119 size_t len_lock = len; 2120 ssize_t ret; 2121 2122 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); 2123 2124 ret = spi_nor_prep_and_lock_rd(nor, from_lock, len_lock); 2125 if (ret) 2126 return ret; 2127 2128 while (len) { 2129 loff_t addr = from; 2130 2131 addr = spi_nor_convert_addr(nor, addr); 2132 2133 ret = spi_nor_read_data(nor, addr, len, buf); 2134 if (ret == 0) { 2135 /* We shouldn't see 0-length reads */ 2136 ret = -EIO; 2137 goto read_err; 2138 } 2139 if (ret < 0) 2140 goto read_err; 2141 2142 WARN_ON(ret > len); 2143 *retlen += ret; 2144 buf += ret; 2145 from += ret; 2146 len -= ret; 2147 } 2148 ret = 0; 2149 2150 read_err: 2151 spi_nor_unlock_and_unprep_rd(nor, from_lock, len_lock); 2152 2153 return ret; 2154 } 2155 2156 /* 2157 * Write an address range to the nor chip. Data must be written in 2158 * FLASH_PAGESIZE chunks. The address range may be any size provided 2159 * it is within the physical boundaries. 2160 */ 2161 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, 2162 size_t *retlen, const u_char *buf) 2163 { 2164 struct spi_nor *nor = mtd_to_spi_nor(mtd); 2165 size_t page_offset, page_remain, i; 2166 ssize_t ret; 2167 u32 page_size = nor->params->page_size; 2168 2169 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); 2170 2171 ret = spi_nor_prep_and_lock_pe(nor, to, len); 2172 if (ret) 2173 return ret; 2174 2175 for (i = 0; i < len; ) { 2176 ssize_t written; 2177 loff_t addr = to + i; 2178 2179 /* 2180 * If page_size is a power of two, the offset can be quickly 2181 * calculated with an AND operation. On the other cases we 2182 * need to do a modulus operation (more expensive). 2183 */ 2184 if (is_power_of_2(page_size)) { 2185 page_offset = addr & (page_size - 1); 2186 } else { 2187 u64 aux = addr; 2188 2189 page_offset = do_div(aux, page_size); 2190 } 2191 /* the size of data remaining on the first page */ 2192 page_remain = min_t(size_t, page_size - page_offset, len - i); 2193 2194 addr = spi_nor_convert_addr(nor, addr); 2195 2196 ret = spi_nor_lock_device(nor); 2197 if (ret) 2198 goto write_err; 2199 2200 ret = spi_nor_write_enable(nor); 2201 if (ret) { 2202 spi_nor_unlock_device(nor); 2203 goto write_err; 2204 } 2205 2206 ret = spi_nor_write_data(nor, addr, page_remain, buf + i); 2207 spi_nor_unlock_device(nor); 2208 if (ret < 0) 2209 goto write_err; 2210 written = ret; 2211 2212 ret = spi_nor_wait_till_ready(nor); 2213 if (ret) 2214 goto write_err; 2215 *retlen += written; 2216 i += written; 2217 } 2218 2219 write_err: 2220 spi_nor_unlock_and_unprep_pe(nor, to, len); 2221 2222 return ret; 2223 } 2224 2225 static int spi_nor_check(struct spi_nor *nor) 2226 { 2227 if (!nor->dev || 2228 (!nor->spimem && !nor->controller_ops) || 2229 (!nor->spimem && nor->controller_ops && 2230 (!nor->controller_ops->read || 2231 !nor->controller_ops->write || 2232 !nor->controller_ops->read_reg || 2233 !nor->controller_ops->write_reg))) { 2234 pr_err("spi-nor: please fill all the necessary fields!\n"); 2235 return -EINVAL; 2236 } 2237 2238 if (nor->spimem && nor->controller_ops) { 2239 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n"); 2240 return -EINVAL; 2241 } 2242 2243 return 0; 2244 } 2245 2246 void 2247 spi_nor_set_read_settings(struct spi_nor_read_command *read, 2248 u8 num_mode_clocks, 2249 u8 num_wait_states, 2250 u8 opcode, 2251 enum spi_nor_protocol proto) 2252 { 2253 read->num_mode_clocks = num_mode_clocks; 2254 read->num_wait_states = num_wait_states; 2255 read->opcode = opcode; 2256 read->proto = proto; 2257 } 2258 2259 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, 2260 enum spi_nor_protocol proto) 2261 { 2262 pp->opcode = opcode; 2263 pp->proto = proto; 2264 } 2265 2266 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) 2267 { 2268 size_t i; 2269 2270 for (i = 0; i < size; i++) 2271 if (table[i][0] == (int)hwcaps) 2272 return table[i][1]; 2273 2274 return -EINVAL; 2275 } 2276 2277 int spi_nor_hwcaps_read2cmd(u32 hwcaps) 2278 { 2279 static const int hwcaps_read2cmd[][2] = { 2280 { SNOR_HWCAPS_READ, SNOR_CMD_READ }, 2281 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, 2282 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, 2283 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, 2284 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, 2285 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, 2286 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, 2287 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, 2288 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, 2289 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, 2290 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, 2291 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, 2292 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, 2293 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, 2294 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, 2295 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR }, 2296 }; 2297 2298 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, 2299 ARRAY_SIZE(hwcaps_read2cmd)); 2300 } 2301 2302 int spi_nor_hwcaps_pp2cmd(u32 hwcaps) 2303 { 2304 static const int hwcaps_pp2cmd[][2] = { 2305 { SNOR_HWCAPS_PP, SNOR_CMD_PP }, 2306 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, 2307 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, 2308 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, 2309 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, 2310 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, 2311 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, 2312 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR }, 2313 }; 2314 2315 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, 2316 ARRAY_SIZE(hwcaps_pp2cmd)); 2317 } 2318 2319 /** 2320 * spi_nor_spimem_check_op - check if the operation is supported 2321 * by controller 2322 *@nor: pointer to a 'struct spi_nor' 2323 *@op: pointer to op template to be checked 2324 * 2325 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2326 */ 2327 static int spi_nor_spimem_check_op(struct spi_nor *nor, 2328 struct spi_mem_op *op) 2329 { 2330 /* 2331 * First test with 4 address bytes. The opcode itself might 2332 * be a 3B addressing opcode but we don't care, because 2333 * SPI controller implementation should not check the opcode, 2334 * but just the sequence. 2335 */ 2336 op->addr.nbytes = 4; 2337 if (!spi_mem_supports_op(nor->spimem, op)) { 2338 if (nor->params->size > SZ_16M) 2339 return -EOPNOTSUPP; 2340 2341 /* If flash size <= 16MB, 3 address bytes are sufficient */ 2342 op->addr.nbytes = 3; 2343 if (!spi_mem_supports_op(nor->spimem, op)) 2344 return -EOPNOTSUPP; 2345 } 2346 2347 return 0; 2348 } 2349 2350 /** 2351 * spi_nor_spimem_check_readop - check if the read op is supported 2352 * by controller 2353 *@nor: pointer to a 'struct spi_nor' 2354 *@read: pointer to op template to be checked 2355 * 2356 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2357 */ 2358 static int spi_nor_spimem_check_readop(struct spi_nor *nor, 2359 const struct spi_nor_read_command *read) 2360 { 2361 struct spi_mem_op op = SPI_NOR_READ_OP(read->opcode); 2362 2363 spi_nor_spimem_setup_op(nor, &op, read->proto); 2364 2365 /* convert the dummy cycles to the number of bytes */ 2366 op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) * 2367 op.dummy.buswidth / 8; 2368 if (spi_nor_protocol_is_dtr(nor->read_proto)) 2369 op.dummy.nbytes *= 2; 2370 2371 return spi_nor_spimem_check_op(nor, &op); 2372 } 2373 2374 /** 2375 * spi_nor_spimem_check_pp - check if the page program op is supported 2376 * by controller 2377 *@nor: pointer to a 'struct spi_nor' 2378 *@pp: pointer to op template to be checked 2379 * 2380 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2381 */ 2382 static int spi_nor_spimem_check_pp(struct spi_nor *nor, 2383 const struct spi_nor_pp_command *pp) 2384 { 2385 struct spi_mem_op op = SPI_NOR_PP_OP(pp->opcode); 2386 2387 spi_nor_spimem_setup_op(nor, &op, pp->proto); 2388 2389 return spi_nor_spimem_check_op(nor, &op); 2390 } 2391 2392 /** 2393 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol 2394 * based on SPI controller capabilities 2395 * @nor: pointer to a 'struct spi_nor' 2396 * @hwcaps: pointer to resulting capabilities after adjusting 2397 * according to controller and flash's capability 2398 */ 2399 static void 2400 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) 2401 { 2402 struct spi_nor_flash_parameter *params = nor->params; 2403 unsigned int cap; 2404 2405 /* X-X-X modes are not supported yet, mask them all. */ 2406 *hwcaps &= ~SNOR_HWCAPS_X_X_X; 2407 2408 /* 2409 * If the reset line is broken, we do not want to enter a stateful 2410 * mode. 2411 */ 2412 if (nor->flags & SNOR_F_BROKEN_RESET) 2413 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR); 2414 2415 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) { 2416 int rdidx, ppidx; 2417 2418 if (!(*hwcaps & BIT(cap))) 2419 continue; 2420 2421 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap)); 2422 if (rdidx >= 0 && 2423 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx])) 2424 *hwcaps &= ~BIT(cap); 2425 2426 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap)); 2427 if (ppidx < 0) 2428 continue; 2429 2430 if (spi_nor_spimem_check_pp(nor, 2431 ¶ms->page_programs[ppidx])) 2432 *hwcaps &= ~BIT(cap); 2433 } 2434 } 2435 2436 /** 2437 * spi_nor_set_erase_type() - set a SPI NOR erase type 2438 * @erase: pointer to a structure that describes a SPI NOR erase type 2439 * @size: the size of the sector/block erased by the erase type 2440 * @opcode: the SPI command op code to erase the sector/block 2441 */ 2442 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, 2443 u8 opcode) 2444 { 2445 erase->size = size; 2446 erase->opcode = opcode; 2447 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 2448 erase->size_shift = ffs(erase->size) - 1; 2449 erase->size_mask = (1 << erase->size_shift) - 1; 2450 } 2451 2452 /** 2453 * spi_nor_mask_erase_type() - mask out a SPI NOR erase type 2454 * @erase: pointer to a structure that describes a SPI NOR erase type 2455 */ 2456 void spi_nor_mask_erase_type(struct spi_nor_erase_type *erase) 2457 { 2458 erase->size = 0; 2459 } 2460 2461 /** 2462 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map 2463 * @map: the erase map of the SPI NOR 2464 * @erase_mask: bitmask encoding erase types that can erase the entire 2465 * flash memory 2466 * @flash_size: the spi nor flash memory size 2467 */ 2468 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, 2469 u8 erase_mask, u64 flash_size) 2470 { 2471 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */ 2472 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) | 2473 SNOR_LAST_REGION; 2474 map->uniform_region.size = flash_size; 2475 map->regions = &map->uniform_region; 2476 map->uniform_erase_type = erase_mask; 2477 } 2478 2479 int spi_nor_post_bfpt_fixups(struct spi_nor *nor, 2480 const struct sfdp_parameter_header *bfpt_header, 2481 const struct sfdp_bfpt *bfpt) 2482 { 2483 int ret; 2484 2485 if (nor->manufacturer && nor->manufacturer->fixups && 2486 nor->manufacturer->fixups->post_bfpt) { 2487 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header, 2488 bfpt); 2489 if (ret) 2490 return ret; 2491 } 2492 2493 if (nor->info->fixups && nor->info->fixups->post_bfpt) 2494 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt); 2495 2496 return 0; 2497 } 2498 2499 static int spi_nor_select_read(struct spi_nor *nor, 2500 u32 shared_hwcaps) 2501 { 2502 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; 2503 const struct spi_nor_read_command *read; 2504 2505 if (best_match < 0) 2506 return -EINVAL; 2507 2508 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); 2509 if (cmd < 0) 2510 return -EINVAL; 2511 2512 read = &nor->params->reads[cmd]; 2513 nor->read_opcode = read->opcode; 2514 nor->read_proto = read->proto; 2515 2516 /* 2517 * In the SPI NOR framework, we don't need to make the difference 2518 * between mode clock cycles and wait state clock cycles. 2519 * Indeed, the value of the mode clock cycles is used by a QSPI 2520 * flash memory to know whether it should enter or leave its 0-4-4 2521 * (Continuous Read / XIP) mode. 2522 * eXecution In Place is out of the scope of the mtd sub-system. 2523 * Hence we choose to merge both mode and wait state clock cycles 2524 * into the so called dummy clock cycles. 2525 */ 2526 nor->read_dummy = read->num_mode_clocks + read->num_wait_states; 2527 return 0; 2528 } 2529 2530 static int spi_nor_select_pp(struct spi_nor *nor, 2531 u32 shared_hwcaps) 2532 { 2533 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; 2534 const struct spi_nor_pp_command *pp; 2535 2536 if (best_match < 0) 2537 return -EINVAL; 2538 2539 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); 2540 if (cmd < 0) 2541 return -EINVAL; 2542 2543 pp = &nor->params->page_programs[cmd]; 2544 nor->program_opcode = pp->opcode; 2545 nor->write_proto = pp->proto; 2546 return 0; 2547 } 2548 2549 /** 2550 * spi_nor_select_uniform_erase() - select optimum uniform erase type 2551 * @map: the erase map of the SPI NOR 2552 * 2553 * Once the optimum uniform sector erase command is found, disable all the 2554 * other. 2555 * 2556 * Return: pointer to erase type on success, NULL otherwise. 2557 */ 2558 static const struct spi_nor_erase_type * 2559 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map) 2560 { 2561 const struct spi_nor_erase_type *tested_erase, *erase = NULL; 2562 int i; 2563 u8 uniform_erase_type = map->uniform_erase_type; 2564 2565 /* 2566 * Search for the biggest erase size, except for when compiled 2567 * to use 4k erases. 2568 */ 2569 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2570 if (!(uniform_erase_type & BIT(i))) 2571 continue; 2572 2573 tested_erase = &map->erase_type[i]; 2574 2575 /* Skip masked erase types. */ 2576 if (!tested_erase->size) 2577 continue; 2578 2579 /* 2580 * If the current erase size is the 4k one, stop here, 2581 * we have found the right uniform Sector Erase command. 2582 */ 2583 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) && 2584 tested_erase->size == SZ_4K) { 2585 erase = tested_erase; 2586 break; 2587 } 2588 2589 /* 2590 * Otherwise, the current erase size is still a valid candidate. 2591 * Select the biggest valid candidate. 2592 */ 2593 if (!erase && tested_erase->size) 2594 erase = tested_erase; 2595 /* keep iterating to find the wanted_size */ 2596 } 2597 2598 if (!erase) 2599 return NULL; 2600 2601 /* Disable all other Sector Erase commands. */ 2602 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK; 2603 map->uniform_erase_type |= BIT(erase - map->erase_type); 2604 return erase; 2605 } 2606 2607 static int spi_nor_select_erase(struct spi_nor *nor) 2608 { 2609 struct spi_nor_erase_map *map = &nor->params->erase_map; 2610 const struct spi_nor_erase_type *erase = NULL; 2611 struct mtd_info *mtd = &nor->mtd; 2612 int i; 2613 2614 /* 2615 * The previous implementation handling Sector Erase commands assumed 2616 * that the SPI flash memory has an uniform layout then used only one 2617 * of the supported erase sizes for all Sector Erase commands. 2618 * So to be backward compatible, the new implementation also tries to 2619 * manage the SPI flash memory as uniform with a single erase sector 2620 * size, when possible. 2621 */ 2622 if (spi_nor_has_uniform_erase(nor)) { 2623 erase = spi_nor_select_uniform_erase(map); 2624 if (!erase) 2625 return -EINVAL; 2626 nor->erase_opcode = erase->opcode; 2627 mtd->erasesize = erase->size; 2628 return 0; 2629 } 2630 2631 /* 2632 * For non-uniform SPI flash memory, set mtd->erasesize to the 2633 * maximum erase sector size. No need to set nor->erase_opcode. 2634 */ 2635 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2636 if (map->erase_type[i].size) { 2637 erase = &map->erase_type[i]; 2638 break; 2639 } 2640 } 2641 2642 if (!erase) 2643 return -EINVAL; 2644 2645 mtd->erasesize = erase->size; 2646 return 0; 2647 } 2648 2649 static int spi_nor_default_setup(struct spi_nor *nor, 2650 const struct spi_nor_hwcaps *hwcaps) 2651 { 2652 struct spi_nor_flash_parameter *params = nor->params; 2653 u32 ignored_mask, shared_mask; 2654 int err; 2655 2656 /* 2657 * Keep only the hardware capabilities supported by both the SPI 2658 * controller and the SPI flash memory. 2659 */ 2660 shared_mask = hwcaps->mask & params->hwcaps.mask; 2661 2662 if (nor->spimem) { 2663 /* 2664 * When called from spi_nor_probe(), all caps are set and we 2665 * need to discard some of them based on what the SPI 2666 * controller actually supports (using spi_mem_supports_op()). 2667 */ 2668 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask); 2669 } else { 2670 /* 2671 * SPI n-n-n protocols are not supported when the SPI 2672 * controller directly implements the spi_nor interface. 2673 * Yet another reason to switch to spi-mem. 2674 */ 2675 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR; 2676 if (shared_mask & ignored_mask) { 2677 dev_dbg(nor->dev, 2678 "SPI n-n-n protocols are not supported.\n"); 2679 shared_mask &= ~ignored_mask; 2680 } 2681 } 2682 2683 /* Select the (Fast) Read command. */ 2684 err = spi_nor_select_read(nor, shared_mask); 2685 if (err) { 2686 dev_dbg(nor->dev, 2687 "can't select read settings supported by both the SPI controller and memory.\n"); 2688 return err; 2689 } 2690 2691 /* Select the Page Program command. */ 2692 err = spi_nor_select_pp(nor, shared_mask); 2693 if (err) { 2694 dev_dbg(nor->dev, 2695 "can't select write settings supported by both the SPI controller and memory.\n"); 2696 return err; 2697 } 2698 2699 /* Select the Sector Erase command. */ 2700 err = spi_nor_select_erase(nor); 2701 if (err) { 2702 dev_dbg(nor->dev, 2703 "can't select erase settings supported by both the SPI controller and memory.\n"); 2704 return err; 2705 } 2706 2707 return 0; 2708 } 2709 2710 static int spi_nor_set_addr_nbytes(struct spi_nor *nor) 2711 { 2712 if (nor->params->addr_nbytes) { 2713 nor->addr_nbytes = nor->params->addr_nbytes; 2714 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) { 2715 /* 2716 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So 2717 * in this protocol an odd addr_nbytes cannot be used because 2718 * then the address phase would only span a cycle and a half. 2719 * Half a cycle would be left over. We would then have to start 2720 * the dummy phase in the middle of a cycle and so too the data 2721 * phase, and we will end the transaction with half a cycle left 2722 * over. 2723 * 2724 * Force all 8D-8D-8D flashes to use an addr_nbytes of 4 to 2725 * avoid this situation. 2726 */ 2727 nor->addr_nbytes = 4; 2728 } else if (nor->info->addr_nbytes) { 2729 nor->addr_nbytes = nor->info->addr_nbytes; 2730 } else { 2731 nor->addr_nbytes = 3; 2732 } 2733 2734 if (nor->addr_nbytes == 3 && nor->params->size > 0x1000000) { 2735 /* enable 4-byte addressing if the device exceeds 16MiB */ 2736 nor->addr_nbytes = 4; 2737 } 2738 2739 if (nor->addr_nbytes > SPI_NOR_MAX_ADDR_NBYTES) { 2740 dev_dbg(nor->dev, "The number of address bytes is too large: %u\n", 2741 nor->addr_nbytes); 2742 return -EINVAL; 2743 } 2744 2745 /* Set 4byte opcodes when possible. */ 2746 if (nor->addr_nbytes == 4 && nor->flags & SNOR_F_4B_OPCODES && 2747 !(nor->flags & SNOR_F_HAS_4BAIT)) 2748 spi_nor_set_4byte_opcodes(nor); 2749 2750 return 0; 2751 } 2752 2753 static int spi_nor_setup(struct spi_nor *nor, 2754 const struct spi_nor_hwcaps *hwcaps) 2755 { 2756 int ret; 2757 2758 if (nor->params->setup) 2759 ret = nor->params->setup(nor, hwcaps); 2760 else 2761 ret = spi_nor_default_setup(nor, hwcaps); 2762 if (ret) 2763 return ret; 2764 2765 return spi_nor_set_addr_nbytes(nor); 2766 } 2767 2768 /** 2769 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and 2770 * settings based on MFR register and ->default_init() hook. 2771 * @nor: pointer to a 'struct spi_nor'. 2772 */ 2773 static void spi_nor_manufacturer_init_params(struct spi_nor *nor) 2774 { 2775 if (nor->manufacturer && nor->manufacturer->fixups && 2776 nor->manufacturer->fixups->default_init) 2777 nor->manufacturer->fixups->default_init(nor); 2778 2779 if (nor->info->fixups && nor->info->fixups->default_init) 2780 nor->info->fixups->default_init(nor); 2781 } 2782 2783 /** 2784 * spi_nor_no_sfdp_init_params() - Initialize the flash's parameters and 2785 * settings based on nor->info->sfdp_flags. This method should be called only by 2786 * flashes that do not define SFDP tables. If the flash supports SFDP but the 2787 * information is wrong and the settings from this function can not be retrieved 2788 * by parsing SFDP, one should instead use the fixup hooks and update the wrong 2789 * bits. 2790 * @nor: pointer to a 'struct spi_nor'. 2791 */ 2792 static void spi_nor_no_sfdp_init_params(struct spi_nor *nor) 2793 { 2794 struct spi_nor_flash_parameter *params = nor->params; 2795 struct spi_nor_erase_map *map = ¶ms->erase_map; 2796 const struct flash_info *info = nor->info; 2797 const u8 no_sfdp_flags = info->no_sfdp_flags; 2798 u8 i, erase_mask; 2799 2800 if (no_sfdp_flags & SPI_NOR_DUAL_READ) { 2801 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; 2802 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], 2803 0, 8, SPINOR_OP_READ_1_1_2, 2804 SNOR_PROTO_1_1_2); 2805 } 2806 2807 if (no_sfdp_flags & SPI_NOR_QUAD_READ) { 2808 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; 2809 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], 2810 0, 8, SPINOR_OP_READ_1_1_4, 2811 SNOR_PROTO_1_1_4); 2812 } 2813 2814 if (no_sfdp_flags & SPI_NOR_OCTAL_READ) { 2815 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; 2816 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], 2817 0, 8, SPINOR_OP_READ_1_1_8, 2818 SNOR_PROTO_1_1_8); 2819 } 2820 2821 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_READ) { 2822 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; 2823 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], 2824 0, 20, SPINOR_OP_READ_FAST, 2825 SNOR_PROTO_8_8_8_DTR); 2826 } 2827 2828 if (no_sfdp_flags & SPI_NOR_OCTAL_DTR_PP) { 2829 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR; 2830 /* 2831 * Since xSPI Page Program opcode is backward compatible with 2832 * Legacy SPI, use Legacy SPI opcode there as well. 2833 */ 2834 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR], 2835 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR); 2836 } 2837 2838 /* 2839 * Sector Erase settings. Sort Erase Types in ascending order, with the 2840 * smallest erase size starting at BIT(0). 2841 */ 2842 erase_mask = 0; 2843 i = 0; 2844 if (no_sfdp_flags & SECT_4K) { 2845 erase_mask |= BIT(i); 2846 spi_nor_set_erase_type(&map->erase_type[i], 4096u, 2847 SPINOR_OP_BE_4K); 2848 i++; 2849 } 2850 erase_mask |= BIT(i); 2851 spi_nor_set_erase_type(&map->erase_type[i], 2852 info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE, 2853 SPINOR_OP_SE); 2854 spi_nor_init_uniform_erase_map(map, erase_mask, params->size); 2855 } 2856 2857 /** 2858 * spi_nor_init_flags() - Initialize NOR flags for settings that are not defined 2859 * in the JESD216 SFDP standard, thus can not be retrieved when parsing SFDP. 2860 * @nor: pointer to a 'struct spi_nor' 2861 */ 2862 static void spi_nor_init_flags(struct spi_nor *nor) 2863 { 2864 struct device_node *np = spi_nor_get_flash_node(nor); 2865 const u16 flags = nor->info->flags; 2866 2867 if (of_property_read_bool(np, "broken-flash-reset")) 2868 nor->flags |= SNOR_F_BROKEN_RESET; 2869 2870 if (of_property_read_bool(np, "no-wp")) 2871 nor->flags |= SNOR_F_NO_WP; 2872 2873 if (flags & SPI_NOR_SWP_IS_VOLATILE) 2874 nor->flags |= SNOR_F_SWP_IS_VOLATILE; 2875 2876 if (flags & SPI_NOR_HAS_LOCK) 2877 nor->flags |= SNOR_F_HAS_LOCK; 2878 2879 if (flags & SPI_NOR_HAS_TB) { 2880 nor->flags |= SNOR_F_HAS_SR_TB; 2881 if (flags & SPI_NOR_TB_SR_BIT6) 2882 nor->flags |= SNOR_F_HAS_SR_TB_BIT6; 2883 } 2884 2885 if (flags & SPI_NOR_4BIT_BP) { 2886 nor->flags |= SNOR_F_HAS_4BIT_BP; 2887 if (flags & SPI_NOR_BP3_SR_BIT6) 2888 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; 2889 } 2890 2891 if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 && 2892 !nor->controller_ops) 2893 nor->flags |= SNOR_F_RWW; 2894 } 2895 2896 /** 2897 * spi_nor_init_fixup_flags() - Initialize NOR flags for settings that can not 2898 * be discovered by SFDP for this particular flash because the SFDP table that 2899 * indicates this support is not defined in the flash. In case the table for 2900 * this support is defined but has wrong values, one should instead use a 2901 * post_sfdp() hook to set the SNOR_F equivalent flag. 2902 * @nor: pointer to a 'struct spi_nor' 2903 */ 2904 static void spi_nor_init_fixup_flags(struct spi_nor *nor) 2905 { 2906 const u8 fixup_flags = nor->info->fixup_flags; 2907 2908 if (fixup_flags & SPI_NOR_4B_OPCODES) 2909 nor->flags |= SNOR_F_4B_OPCODES; 2910 2911 if (fixup_flags & SPI_NOR_IO_MODE_EN_VOLATILE) 2912 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; 2913 } 2914 2915 /** 2916 * spi_nor_late_init_params() - Late initialization of default flash parameters. 2917 * @nor: pointer to a 'struct spi_nor' 2918 * 2919 * Used to initialize flash parameters that are not declared in the JESD216 2920 * SFDP standard, or where SFDP tables are not defined at all. 2921 * Will replace the spi_nor_manufacturer_init_params() method. 2922 */ 2923 static int spi_nor_late_init_params(struct spi_nor *nor) 2924 { 2925 struct spi_nor_flash_parameter *params = nor->params; 2926 int ret; 2927 2928 if (nor->manufacturer && nor->manufacturer->fixups && 2929 nor->manufacturer->fixups->late_init) { 2930 ret = nor->manufacturer->fixups->late_init(nor); 2931 if (ret) 2932 return ret; 2933 } 2934 2935 /* Needed by some flashes late_init hooks. */ 2936 spi_nor_init_flags(nor); 2937 2938 if (nor->info->fixups && nor->info->fixups->late_init) { 2939 ret = nor->info->fixups->late_init(nor); 2940 if (ret) 2941 return ret; 2942 } 2943 2944 if (!nor->params->die_erase_opcode) 2945 nor->params->die_erase_opcode = SPINOR_OP_CHIP_ERASE; 2946 2947 /* Default method kept for backward compatibility. */ 2948 if (!params->set_4byte_addr_mode) 2949 params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_brwr; 2950 2951 spi_nor_init_fixup_flags(nor); 2952 2953 /* 2954 * NOR protection support. When locking_ops are not provided, we pick 2955 * the default ones. 2956 */ 2957 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) 2958 spi_nor_init_default_locking_ops(nor); 2959 2960 if (params->n_banks > 1) 2961 params->bank_size = div64_u64(params->size, params->n_banks); 2962 2963 return 0; 2964 } 2965 2966 /** 2967 * spi_nor_sfdp_init_params_deprecated() - Deprecated way of initializing flash 2968 * parameters and settings based on JESD216 SFDP standard. 2969 * @nor: pointer to a 'struct spi_nor'. 2970 * 2971 * The method has a roll-back mechanism: in case the SFDP parsing fails, the 2972 * legacy flash parameters and settings will be restored. 2973 */ 2974 static void spi_nor_sfdp_init_params_deprecated(struct spi_nor *nor) 2975 { 2976 struct spi_nor_flash_parameter sfdp_params; 2977 2978 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); 2979 2980 if (spi_nor_parse_sfdp(nor)) { 2981 memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); 2982 nor->flags &= ~SNOR_F_4B_OPCODES; 2983 } 2984 } 2985 2986 /** 2987 * spi_nor_init_params_deprecated() - Deprecated way of initializing flash 2988 * parameters and settings. 2989 * @nor: pointer to a 'struct spi_nor'. 2990 * 2991 * The method assumes that flash doesn't support SFDP so it initializes flash 2992 * parameters in spi_nor_no_sfdp_init_params() which later on can be overwritten 2993 * when parsing SFDP, if supported. 2994 */ 2995 static void spi_nor_init_params_deprecated(struct spi_nor *nor) 2996 { 2997 spi_nor_no_sfdp_init_params(nor); 2998 2999 spi_nor_manufacturer_init_params(nor); 3000 3001 if (nor->info->no_sfdp_flags & (SPI_NOR_DUAL_READ | 3002 SPI_NOR_QUAD_READ | 3003 SPI_NOR_OCTAL_READ | 3004 SPI_NOR_OCTAL_DTR_READ)) 3005 spi_nor_sfdp_init_params_deprecated(nor); 3006 } 3007 3008 /** 3009 * spi_nor_init_default_params() - Default initialization of flash parameters 3010 * and settings. Done for all flashes, regardless is they define SFDP tables 3011 * or not. 3012 * @nor: pointer to a 'struct spi_nor'. 3013 */ 3014 static void spi_nor_init_default_params(struct spi_nor *nor) 3015 { 3016 struct spi_nor_flash_parameter *params = nor->params; 3017 const struct flash_info *info = nor->info; 3018 struct device_node *np = spi_nor_get_flash_node(nor); 3019 3020 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 3021 params->otp.org = info->otp; 3022 3023 /* Default to 16-bit Write Status (01h) Command */ 3024 nor->flags |= SNOR_F_HAS_16BIT_SR; 3025 3026 /* Set SPI NOR sizes. */ 3027 params->writesize = 1; 3028 params->size = info->size; 3029 params->bank_size = params->size; 3030 params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE; 3031 params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS; 3032 3033 if (!(info->flags & SPI_NOR_NO_FR)) { 3034 /* Default to Fast Read for DT and non-DT platform devices. */ 3035 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; 3036 3037 /* Mask out Fast Read if not requested at DT instantiation. */ 3038 if (np && !of_property_read_bool(np, "m25p,fast-read")) 3039 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; 3040 } 3041 3042 /* (Fast) Read settings. */ 3043 params->hwcaps.mask |= SNOR_HWCAPS_READ; 3044 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], 3045 0, 0, SPINOR_OP_READ, 3046 SNOR_PROTO_1_1_1); 3047 3048 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST) 3049 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], 3050 0, 8, SPINOR_OP_READ_FAST, 3051 SNOR_PROTO_1_1_1); 3052 /* Page Program settings. */ 3053 params->hwcaps.mask |= SNOR_HWCAPS_PP; 3054 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], 3055 SPINOR_OP_PP, SNOR_PROTO_1_1_1); 3056 3057 if (info->flags & SPI_NOR_QUAD_PP) { 3058 params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4; 3059 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4], 3060 SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4); 3061 } 3062 } 3063 3064 /** 3065 * spi_nor_init_params() - Initialize the flash's parameters and settings. 3066 * @nor: pointer to a 'struct spi_nor'. 3067 * 3068 * The flash parameters and settings are initialized based on a sequence of 3069 * calls that are ordered by priority: 3070 * 3071 * 1/ Default flash parameters initialization. The initializations are done 3072 * based on nor->info data: 3073 * spi_nor_info_init_params() 3074 * 3075 * which can be overwritten by: 3076 * 2/ Manufacturer flash parameters initialization. The initializations are 3077 * done based on MFR register, or when the decisions can not be done solely 3078 * based on MFR, by using specific flash_info tweeks, ->default_init(): 3079 * spi_nor_manufacturer_init_params() 3080 * 3081 * which can be overwritten by: 3082 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and 3083 * should be more accurate that the above. 3084 * spi_nor_parse_sfdp() or spi_nor_no_sfdp_init_params() 3085 * 3086 * Please note that there is a ->post_bfpt() fixup hook that can overwrite 3087 * the flash parameters and settings immediately after parsing the Basic 3088 * Flash Parameter Table. 3089 * spi_nor_post_sfdp_fixups() is called after the SFDP tables are parsed. 3090 * It is used to tweak various flash parameters when information provided 3091 * by the SFDP tables are wrong. 3092 * 3093 * which can be overwritten by: 3094 * 4/ Late flash parameters initialization, used to initialize flash 3095 * parameters that are not declared in the JESD216 SFDP standard, or where SFDP 3096 * tables are not defined at all. 3097 * spi_nor_late_init_params() 3098 * 3099 * Return: 0 on success, -errno otherwise. 3100 */ 3101 static int spi_nor_init_params(struct spi_nor *nor) 3102 { 3103 int ret; 3104 3105 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); 3106 if (!nor->params) 3107 return -ENOMEM; 3108 3109 spi_nor_init_default_params(nor); 3110 3111 if (spi_nor_needs_sfdp(nor)) { 3112 ret = spi_nor_parse_sfdp(nor); 3113 if (ret) { 3114 dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n"); 3115 return ret; 3116 } 3117 } else if (nor->info->no_sfdp_flags & SPI_NOR_SKIP_SFDP) { 3118 spi_nor_no_sfdp_init_params(nor); 3119 } else { 3120 spi_nor_init_params_deprecated(nor); 3121 } 3122 3123 return spi_nor_late_init_params(nor); 3124 } 3125 3126 /** spi_nor_set_octal_dtr() - enable or disable Octal DTR I/O. 3127 * @nor: pointer to a 'struct spi_nor' 3128 * @enable: whether to enable or disable Octal DTR 3129 * 3130 * Return: 0 on success, -errno otherwise. 3131 */ 3132 static int spi_nor_set_octal_dtr(struct spi_nor *nor, bool enable) 3133 { 3134 int ret; 3135 3136 if (!nor->params->set_octal_dtr) 3137 return 0; 3138 3139 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR && 3140 nor->write_proto == SNOR_PROTO_8_8_8_DTR)) 3141 return 0; 3142 3143 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE)) 3144 return 0; 3145 3146 ret = nor->params->set_octal_dtr(nor, enable); 3147 if (ret) 3148 return ret; 3149 3150 if (enable) 3151 nor->reg_proto = SNOR_PROTO_8_8_8_DTR; 3152 else 3153 nor->reg_proto = SNOR_PROTO_1_1_1; 3154 3155 return 0; 3156 } 3157 3158 /** 3159 * spi_nor_quad_enable() - enable Quad I/O if needed. 3160 * @nor: pointer to a 'struct spi_nor' 3161 * 3162 * Return: 0 on success, -errno otherwise. 3163 */ 3164 static int spi_nor_quad_enable(struct spi_nor *nor) 3165 { 3166 if (!nor->params->quad_enable) 3167 return 0; 3168 3169 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 || 3170 spi_nor_get_protocol_width(nor->write_proto) == 4)) 3171 return 0; 3172 3173 return nor->params->quad_enable(nor); 3174 } 3175 3176 /** 3177 * spi_nor_set_4byte_addr_mode() - Set address mode. 3178 * @nor: pointer to a 'struct spi_nor'. 3179 * @enable: enable/disable 4 byte address mode. 3180 * 3181 * Return: 0 on success, -errno otherwise. 3182 */ 3183 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) 3184 { 3185 struct spi_nor_flash_parameter *params = nor->params; 3186 int ret; 3187 3188 if (enable) { 3189 /* 3190 * If the RESET# pin isn't hooked up properly, or the system 3191 * otherwise doesn't perform a reset command in the boot 3192 * sequence, it's impossible to 100% protect against unexpected 3193 * reboots (e.g., crashes). Warn the user (or hopefully, system 3194 * designer) that this is bad. 3195 */ 3196 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, 3197 "enabling reset hack; may not recover from unexpected reboots\n"); 3198 } 3199 3200 ret = params->set_4byte_addr_mode(nor, enable); 3201 if (ret && ret != -EOPNOTSUPP) 3202 return ret; 3203 3204 if (enable) { 3205 params->addr_nbytes = 4; 3206 params->addr_mode_nbytes = 4; 3207 } else { 3208 params->addr_nbytes = 3; 3209 params->addr_mode_nbytes = 3; 3210 } 3211 3212 return 0; 3213 } 3214 3215 static int spi_nor_init(struct spi_nor *nor) 3216 { 3217 int err; 3218 3219 err = spi_nor_set_octal_dtr(nor, true); 3220 if (err) { 3221 dev_dbg(nor->dev, "octal mode not supported\n"); 3222 return err; 3223 } 3224 3225 err = spi_nor_quad_enable(nor); 3226 if (err) { 3227 dev_dbg(nor->dev, "quad mode not supported\n"); 3228 return err; 3229 } 3230 3231 /* 3232 * Some SPI NOR flashes are write protected by default after a power-on 3233 * reset cycle, in order to avoid inadvertent writes during power-up. 3234 * Backward compatibility imposes to unlock the entire flash memory 3235 * array at power-up by default. Depending on the kernel configuration 3236 * (1) do nothing, (2) always unlock the entire flash array or (3) 3237 * unlock the entire flash array only when the software write 3238 * protection bits are volatile. The latter is indicated by 3239 * SNOR_F_SWP_IS_VOLATILE. 3240 */ 3241 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) || 3242 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) && 3243 nor->flags & SNOR_F_SWP_IS_VOLATILE)) 3244 spi_nor_try_unlock_all(nor); 3245 3246 if (nor->addr_nbytes == 4 && 3247 nor->read_proto != SNOR_PROTO_8_8_8_DTR && 3248 !(nor->flags & SNOR_F_4B_OPCODES)) 3249 return spi_nor_set_4byte_addr_mode(nor, true); 3250 3251 return 0; 3252 } 3253 3254 /** 3255 * spi_nor_soft_reset() - Perform a software reset 3256 * @nor: pointer to 'struct spi_nor' 3257 * 3258 * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets 3259 * the device to its power-on-reset state. This is useful when the software has 3260 * made some changes to device (volatile) registers and needs to reset it before 3261 * shutting down, for example. 3262 * 3263 * Not every flash supports this sequence. The same set of opcodes might be used 3264 * for some other operation on a flash that does not support this. Support for 3265 * this sequence can be discovered via SFDP in the BFPT table. 3266 * 3267 * Return: 0 on success, -errno otherwise. 3268 */ 3269 static void spi_nor_soft_reset(struct spi_nor *nor) 3270 { 3271 struct spi_mem_op op; 3272 int ret; 3273 3274 op = (struct spi_mem_op)SPINOR_SRSTEN_OP; 3275 3276 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 3277 3278 ret = spi_mem_exec_op(nor->spimem, &op); 3279 if (ret) { 3280 if (ret != -EOPNOTSUPP) 3281 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 3282 return; 3283 } 3284 3285 op = (struct spi_mem_op)SPINOR_SRST_OP; 3286 3287 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 3288 3289 ret = spi_mem_exec_op(nor->spimem, &op); 3290 if (ret) { 3291 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 3292 return; 3293 } 3294 3295 /* 3296 * Software Reset is not instant, and the delay varies from flash to 3297 * flash. Looking at a few flashes, most range somewhere below 100 3298 * microseconds. So, sleep for a range of 200-400 us. 3299 */ 3300 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX); 3301 } 3302 3303 /* mtd suspend handler */ 3304 static int spi_nor_suspend(struct mtd_info *mtd) 3305 { 3306 struct spi_nor *nor = mtd_to_spi_nor(mtd); 3307 int ret; 3308 3309 /* Disable octal DTR mode if we enabled it. */ 3310 ret = spi_nor_set_octal_dtr(nor, false); 3311 if (ret) 3312 dev_err(nor->dev, "suspend() failed\n"); 3313 3314 return ret; 3315 } 3316 3317 /* mtd resume handler */ 3318 static void spi_nor_resume(struct mtd_info *mtd) 3319 { 3320 struct spi_nor *nor = mtd_to_spi_nor(mtd); 3321 struct device *dev = nor->dev; 3322 int ret; 3323 3324 /* re-initialize the nor chip */ 3325 ret = spi_nor_init(nor); 3326 if (ret) 3327 dev_err(dev, "resume() failed\n"); 3328 } 3329 3330 static int spi_nor_get_device(struct mtd_info *mtd) 3331 { 3332 struct mtd_info *master = mtd_get_master(mtd); 3333 struct spi_nor *nor = mtd_to_spi_nor(master); 3334 struct device *dev; 3335 3336 if (nor->spimem) 3337 dev = nor->spimem->spi->controller->dev.parent; 3338 else 3339 dev = nor->dev; 3340 3341 if (!try_module_get(dev->driver->owner)) 3342 return -ENODEV; 3343 3344 return 0; 3345 } 3346 3347 static void spi_nor_put_device(struct mtd_info *mtd) 3348 { 3349 struct mtd_info *master = mtd_get_master(mtd); 3350 struct spi_nor *nor = mtd_to_spi_nor(master); 3351 struct device *dev; 3352 3353 if (nor->spimem) 3354 dev = nor->spimem->spi->controller->dev.parent; 3355 else 3356 dev = nor->dev; 3357 3358 module_put(dev->driver->owner); 3359 } 3360 3361 static void spi_nor_restore(struct spi_nor *nor) 3362 { 3363 int ret; 3364 3365 /* restore the addressing mode */ 3366 if (nor->addr_nbytes == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && 3367 nor->flags & SNOR_F_BROKEN_RESET) { 3368 ret = spi_nor_set_4byte_addr_mode(nor, false); 3369 if (ret) 3370 /* 3371 * Do not stop the execution in the hope that the flash 3372 * will default to the 3-byte address mode after the 3373 * software reset. 3374 */ 3375 dev_err(nor->dev, "Failed to exit 4-byte address mode, err = %d\n", ret); 3376 } 3377 3378 if (nor->flags & SNOR_F_SOFT_RESET) 3379 spi_nor_soft_reset(nor); 3380 } 3381 3382 static const struct flash_info *spi_nor_match_name(struct spi_nor *nor, 3383 const char *name) 3384 { 3385 unsigned int i, j; 3386 3387 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 3388 for (j = 0; j < manufacturers[i]->nparts; j++) { 3389 if (!strcmp(name, manufacturers[i]->parts[j].name)) { 3390 nor->manufacturer = manufacturers[i]; 3391 return &manufacturers[i]->parts[j]; 3392 } 3393 } 3394 } 3395 3396 return NULL; 3397 } 3398 3399 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, 3400 const char *name) 3401 { 3402 const struct flash_info *info = NULL; 3403 3404 if (name) 3405 info = spi_nor_match_name(nor, name); 3406 /* Try to auto-detect if chip name wasn't specified or not found */ 3407 if (!info) 3408 return spi_nor_detect(nor); 3409 3410 /* 3411 * If caller has specified name of flash model that can normally be 3412 * detected using JEDEC, let's verify it. 3413 */ 3414 if (name && info->id) { 3415 const struct flash_info *jinfo; 3416 3417 jinfo = spi_nor_detect(nor); 3418 if (IS_ERR(jinfo)) { 3419 return jinfo; 3420 } else if (jinfo != info) { 3421 /* 3422 * JEDEC knows better, so overwrite platform ID. We 3423 * can't trust partitions any longer, but we'll let 3424 * mtd apply them anyway, since some partitions may be 3425 * marked read-only, and we don't want to loose that 3426 * information, even if it's not 100% accurate. 3427 */ 3428 dev_warn(nor->dev, "found %s, expected %s\n", 3429 jinfo->name, info->name); 3430 info = jinfo; 3431 } 3432 } 3433 3434 return info; 3435 } 3436 3437 static void spi_nor_set_mtd_info(struct spi_nor *nor) 3438 { 3439 struct mtd_info *mtd = &nor->mtd; 3440 struct device *dev = nor->dev; 3441 3442 spi_nor_set_mtd_locking_ops(nor); 3443 spi_nor_set_mtd_otp_ops(nor); 3444 3445 mtd->dev.parent = dev; 3446 if (!mtd->name) 3447 mtd->name = dev_name(dev); 3448 mtd->type = MTD_NORFLASH; 3449 mtd->flags = MTD_CAP_NORFLASH; 3450 /* Unset BIT_WRITEABLE to enable JFFS2 write buffer for ECC'd NOR */ 3451 if (nor->flags & SNOR_F_ECC) 3452 mtd->flags &= ~MTD_BIT_WRITEABLE; 3453 if (nor->info->flags & SPI_NOR_NO_ERASE) 3454 mtd->flags |= MTD_NO_ERASE; 3455 else 3456 mtd->_erase = spi_nor_erase; 3457 mtd->writesize = nor->params->writesize; 3458 mtd->writebufsize = nor->params->page_size; 3459 mtd->size = nor->params->size; 3460 mtd->_read = spi_nor_read; 3461 /* Might be already set by some SST flashes. */ 3462 if (!mtd->_write) 3463 mtd->_write = spi_nor_write; 3464 mtd->_suspend = spi_nor_suspend; 3465 mtd->_resume = spi_nor_resume; 3466 mtd->_get_device = spi_nor_get_device; 3467 mtd->_put_device = spi_nor_put_device; 3468 } 3469 3470 static int spi_nor_hw_reset(struct spi_nor *nor) 3471 { 3472 struct gpio_desc *reset; 3473 3474 reset = devm_gpiod_get_optional(nor->dev, "reset", GPIOD_OUT_LOW); 3475 if (IS_ERR_OR_NULL(reset)) 3476 return PTR_ERR_OR_ZERO(reset); 3477 3478 /* 3479 * Experimental delay values by looking at different flash device 3480 * vendors datasheets. 3481 */ 3482 usleep_range(1, 5); 3483 gpiod_set_value_cansleep(reset, 1); 3484 usleep_range(100, 150); 3485 gpiod_set_value_cansleep(reset, 0); 3486 usleep_range(1000, 1200); 3487 3488 return 0; 3489 } 3490 3491 int spi_nor_scan(struct spi_nor *nor, const char *name, 3492 const struct spi_nor_hwcaps *hwcaps) 3493 { 3494 const struct flash_info *info; 3495 struct device *dev = nor->dev; 3496 int ret; 3497 3498 ret = spi_nor_check(nor); 3499 if (ret) 3500 return ret; 3501 3502 /* Reset SPI protocol for all commands. */ 3503 nor->reg_proto = SNOR_PROTO_1_1_1; 3504 nor->read_proto = SNOR_PROTO_1_1_1; 3505 nor->write_proto = SNOR_PROTO_1_1_1; 3506 3507 /* 3508 * We need the bounce buffer early to read/write registers when going 3509 * through the spi-mem layer (buffers have to be DMA-able). 3510 * For spi-mem drivers, we'll reallocate a new buffer if 3511 * nor->params->page_size turns out to be greater than PAGE_SIZE (which 3512 * shouldn't happen before long since NOR pages are usually less 3513 * than 1KB) after spi_nor_scan() returns. 3514 */ 3515 nor->bouncebuf_size = PAGE_SIZE; 3516 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size, 3517 GFP_KERNEL); 3518 if (!nor->bouncebuf) 3519 return -ENOMEM; 3520 3521 ret = spi_nor_hw_reset(nor); 3522 if (ret) 3523 return ret; 3524 3525 info = spi_nor_get_flash_info(nor, name); 3526 if (IS_ERR(info)) 3527 return PTR_ERR(info); 3528 3529 nor->info = info; 3530 3531 mutex_init(&nor->lock); 3532 3533 /* Init flash parameters based on flash_info struct and SFDP */ 3534 ret = spi_nor_init_params(nor); 3535 if (ret) 3536 return ret; 3537 3538 if (spi_nor_use_parallel_locking(nor)) 3539 init_waitqueue_head(&nor->rww.wait); 3540 3541 /* 3542 * Configure the SPI memory: 3543 * - select op codes for (Fast) Read, Page Program and Sector Erase. 3544 * - set the number of dummy cycles (mode cycles + wait states). 3545 * - set the SPI protocols for register and memory accesses. 3546 * - set the number of address bytes. 3547 */ 3548 ret = spi_nor_setup(nor, hwcaps); 3549 if (ret) 3550 return ret; 3551 3552 /* Send all the required SPI flash commands to initialize device */ 3553 ret = spi_nor_init(nor); 3554 if (ret) 3555 return ret; 3556 3557 /* No mtd_info fields should be used up to this point. */ 3558 spi_nor_set_mtd_info(nor); 3559 3560 dev_dbg(dev, "Manufacturer and device ID: %*phN\n", 3561 SPI_NOR_MAX_ID_LEN, nor->id); 3562 3563 return 0; 3564 } 3565 EXPORT_SYMBOL_GPL(spi_nor_scan); 3566 3567 static int spi_nor_create_read_dirmap(struct spi_nor *nor) 3568 { 3569 struct spi_mem_dirmap_info info = { 3570 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 3571 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0), 3572 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 3573 SPI_MEM_OP_DATA_IN(0, NULL, 0)), 3574 .offset = 0, 3575 .length = nor->params->size, 3576 }; 3577 struct spi_mem_op *op = &info.op_tmpl; 3578 3579 spi_nor_spimem_setup_op(nor, op, nor->read_proto); 3580 3581 /* convert the dummy cycles to the number of bytes */ 3582 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; 3583 if (spi_nor_protocol_is_dtr(nor->read_proto)) 3584 op->dummy.nbytes *= 2; 3585 3586 /* 3587 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3588 * of data bytes is non-zero, the data buswidth won't be set here. So, 3589 * do it explicitly. 3590 */ 3591 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); 3592 3593 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3594 &info); 3595 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc); 3596 } 3597 3598 static int spi_nor_create_write_dirmap(struct spi_nor *nor) 3599 { 3600 struct spi_mem_dirmap_info info = { 3601 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 3602 SPI_MEM_OP_ADDR(nor->addr_nbytes, 0, 0), 3603 SPI_MEM_OP_NO_DUMMY, 3604 SPI_MEM_OP_DATA_OUT(0, NULL, 0)), 3605 .offset = 0, 3606 .length = nor->params->size, 3607 }; 3608 struct spi_mem_op *op = &info.op_tmpl; 3609 3610 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 3611 op->addr.nbytes = 0; 3612 3613 spi_nor_spimem_setup_op(nor, op, nor->write_proto); 3614 3615 /* 3616 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3617 * of data bytes is non-zero, the data buswidth won't be set here. So, 3618 * do it explicitly. 3619 */ 3620 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); 3621 3622 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3623 &info); 3624 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); 3625 } 3626 3627 static int spi_nor_probe(struct spi_mem *spimem) 3628 { 3629 struct spi_device *spi = spimem->spi; 3630 struct flash_platform_data *data = dev_get_platdata(&spi->dev); 3631 struct spi_nor *nor; 3632 /* 3633 * Enable all caps by default. The core will mask them after 3634 * checking what's really supported using spi_mem_supports_op(). 3635 */ 3636 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL }; 3637 char *flash_name; 3638 int ret; 3639 3640 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL); 3641 if (!nor) 3642 return -ENOMEM; 3643 3644 nor->spimem = spimem; 3645 nor->dev = &spi->dev; 3646 spi_nor_set_flash_node(nor, spi->dev.of_node); 3647 3648 spi_mem_set_drvdata(spimem, nor); 3649 3650 if (data && data->name) 3651 nor->mtd.name = data->name; 3652 3653 if (!nor->mtd.name) 3654 nor->mtd.name = spi_mem_get_name(spimem); 3655 3656 /* 3657 * For some (historical?) reason many platforms provide two different 3658 * names in flash_platform_data: "name" and "type". Quite often name is 3659 * set to "m25p80" and then "type" provides a real chip name. 3660 * If that's the case, respect "type" and ignore a "name". 3661 */ 3662 if (data && data->type) 3663 flash_name = data->type; 3664 else if (!strcmp(spi->modalias, "spi-nor")) 3665 flash_name = NULL; /* auto-detect */ 3666 else 3667 flash_name = spi->modalias; 3668 3669 ret = spi_nor_scan(nor, flash_name, &hwcaps); 3670 if (ret) 3671 return ret; 3672 3673 spi_nor_debugfs_register(nor); 3674 3675 /* 3676 * None of the existing parts have > 512B pages, but let's play safe 3677 * and add this logic so that if anyone ever adds support for such 3678 * a NOR we don't end up with buffer overflows. 3679 */ 3680 if (nor->params->page_size > PAGE_SIZE) { 3681 nor->bouncebuf_size = nor->params->page_size; 3682 devm_kfree(nor->dev, nor->bouncebuf); 3683 nor->bouncebuf = devm_kmalloc(nor->dev, 3684 nor->bouncebuf_size, 3685 GFP_KERNEL); 3686 if (!nor->bouncebuf) 3687 return -ENOMEM; 3688 } 3689 3690 ret = spi_nor_create_read_dirmap(nor); 3691 if (ret) 3692 return ret; 3693 3694 ret = spi_nor_create_write_dirmap(nor); 3695 if (ret) 3696 return ret; 3697 3698 return mtd_device_register(&nor->mtd, data ? data->parts : NULL, 3699 data ? data->nr_parts : 0); 3700 } 3701 3702 static int spi_nor_remove(struct spi_mem *spimem) 3703 { 3704 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3705 3706 spi_nor_restore(nor); 3707 3708 /* Clean up MTD stuff. */ 3709 return mtd_device_unregister(&nor->mtd); 3710 } 3711 3712 static void spi_nor_shutdown(struct spi_mem *spimem) 3713 { 3714 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3715 3716 spi_nor_restore(nor); 3717 } 3718 3719 /* 3720 * Do NOT add to this array without reading the following: 3721 * 3722 * Historically, many flash devices are bound to this driver by their name. But 3723 * since most of these flash are compatible to some extent, and their 3724 * differences can often be differentiated by the JEDEC read-ID command, we 3725 * encourage new users to add support to the spi-nor library, and simply bind 3726 * against a generic string here (e.g., "jedec,spi-nor"). 3727 * 3728 * Many flash names are kept here in this list to keep them available 3729 * as module aliases for existing platforms. 3730 */ 3731 static const struct spi_device_id spi_nor_dev_ids[] = { 3732 /* 3733 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and 3734 * hack around the fact that the SPI core does not provide uevent 3735 * matching for .of_match_table 3736 */ 3737 {"spi-nor"}, 3738 3739 /* 3740 * Entries not used in DTs that should be safe to drop after replacing 3741 * them with "spi-nor" in platform data. 3742 */ 3743 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"}, 3744 3745 /* 3746 * Entries that were used in DTs without "jedec,spi-nor" fallback and 3747 * should be kept for backward compatibility. 3748 */ 3749 {"at25df321a"}, {"at25df641"}, {"at26df081a"}, 3750 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"}, 3751 {"mx25l25635e"},{"mx66l51235l"}, 3752 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"}, 3753 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"}, 3754 {"s25fl064k"}, 3755 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"}, 3756 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"}, 3757 {"m25p64"}, {"m25p128"}, 3758 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, 3759 {"w25q80bl"}, {"w25q128"}, {"w25q256"}, 3760 3761 /* Flashes that can't be detected using JEDEC */ 3762 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, 3763 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, 3764 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, 3765 3766 /* Everspin MRAMs (non-JEDEC) */ 3767 { "mr25h128" }, /* 128 Kib, 40 MHz */ 3768 { "mr25h256" }, /* 256 Kib, 40 MHz */ 3769 { "mr25h10" }, /* 1 Mib, 40 MHz */ 3770 { "mr25h40" }, /* 4 Mib, 40 MHz */ 3771 3772 { }, 3773 }; 3774 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids); 3775 3776 static const struct of_device_id spi_nor_of_table[] = { 3777 /* 3778 * Generic compatibility for SPI NOR that can be identified by the 3779 * JEDEC READ ID opcode (0x9F). Use this, if possible. 3780 */ 3781 { .compatible = "jedec,spi-nor" }, 3782 { /* sentinel */ }, 3783 }; 3784 MODULE_DEVICE_TABLE(of, spi_nor_of_table); 3785 3786 /* 3787 * REVISIT: many of these chips have deep power-down modes, which 3788 * should clearly be entered on suspend() to minimize power use. 3789 * And also when they're otherwise idle... 3790 */ 3791 static struct spi_mem_driver spi_nor_driver = { 3792 .spidrv = { 3793 .driver = { 3794 .name = "spi-nor", 3795 .of_match_table = spi_nor_of_table, 3796 .dev_groups = spi_nor_sysfs_groups, 3797 }, 3798 .id_table = spi_nor_dev_ids, 3799 }, 3800 .probe = spi_nor_probe, 3801 .remove = spi_nor_remove, 3802 .shutdown = spi_nor_shutdown, 3803 }; 3804 3805 static int __init spi_nor_module_init(void) 3806 { 3807 return spi_mem_driver_register(&spi_nor_driver); 3808 } 3809 module_init(spi_nor_module_init); 3810 3811 static void __exit spi_nor_module_exit(void) 3812 { 3813 spi_mem_driver_unregister(&spi_nor_driver); 3814 spi_nor_debugfs_shutdown(); 3815 } 3816 module_exit(spi_nor_module_exit); 3817 3818 MODULE_LICENSE("GPL v2"); 3819 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); 3820 MODULE_AUTHOR("Mike Lavender"); 3821 MODULE_DESCRIPTION("framework for SPI NOR"); 3822