1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with 4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c 5 * 6 * Copyright (C) 2005, Intec Automation Inc. 7 * Copyright (C) 2014, Freescale Semiconductor, Inc. 8 */ 9 10 #include <linux/err.h> 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/device.h> 14 #include <linux/mutex.h> 15 #include <linux/math64.h> 16 #include <linux/sizes.h> 17 #include <linux/slab.h> 18 19 #include <linux/mtd/mtd.h> 20 #include <linux/of_platform.h> 21 #include <linux/sched/task_stack.h> 22 #include <linux/spi/flash.h> 23 #include <linux/mtd/spi-nor.h> 24 25 #include "core.h" 26 27 /* Define max times to check status register before we give up. */ 28 29 /* 30 * For everything but full-chip erase; probably could be much smaller, but kept 31 * around for safety for now 32 */ 33 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) 34 35 /* 36 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up 37 * for larger flash 38 */ 39 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) 40 41 #define SPI_NOR_MAX_ADDR_WIDTH 4 42 43 #define SPI_NOR_SRST_SLEEP_MIN 200 44 #define SPI_NOR_SRST_SLEEP_MAX 400 45 46 /** 47 * spi_nor_get_cmd_ext() - Get the command opcode extension based on the 48 * extension type. 49 * @nor: pointer to a 'struct spi_nor' 50 * @op: pointer to the 'struct spi_mem_op' whose properties 51 * need to be initialized. 52 * 53 * Right now, only "repeat" and "invert" are supported. 54 * 55 * Return: The opcode extension. 56 */ 57 static u8 spi_nor_get_cmd_ext(const struct spi_nor *nor, 58 const struct spi_mem_op *op) 59 { 60 switch (nor->cmd_ext_type) { 61 case SPI_NOR_EXT_INVERT: 62 return ~op->cmd.opcode; 63 64 case SPI_NOR_EXT_REPEAT: 65 return op->cmd.opcode; 66 67 default: 68 dev_err(nor->dev, "Unknown command extension type\n"); 69 return 0; 70 } 71 } 72 73 /** 74 * spi_nor_spimem_setup_op() - Set up common properties of a spi-mem op. 75 * @nor: pointer to a 'struct spi_nor' 76 * @op: pointer to the 'struct spi_mem_op' whose properties 77 * need to be initialized. 78 * @proto: the protocol from which the properties need to be set. 79 */ 80 void spi_nor_spimem_setup_op(const struct spi_nor *nor, 81 struct spi_mem_op *op, 82 const enum spi_nor_protocol proto) 83 { 84 u8 ext; 85 86 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(proto); 87 88 if (op->addr.nbytes) 89 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(proto); 90 91 if (op->dummy.nbytes) 92 op->dummy.buswidth = spi_nor_get_protocol_addr_nbits(proto); 93 94 if (op->data.nbytes) 95 op->data.buswidth = spi_nor_get_protocol_data_nbits(proto); 96 97 if (spi_nor_protocol_is_dtr(proto)) { 98 /* 99 * SPIMEM supports mixed DTR modes, but right now we can only 100 * have all phases either DTR or STR. IOW, SPIMEM can have 101 * something like 4S-4D-4D, but SPI NOR can't. So, set all 4 102 * phases to either DTR or STR. 103 */ 104 op->cmd.dtr = true; 105 op->addr.dtr = true; 106 op->dummy.dtr = true; 107 op->data.dtr = true; 108 109 /* 2 bytes per clock cycle in DTR mode. */ 110 op->dummy.nbytes *= 2; 111 112 ext = spi_nor_get_cmd_ext(nor, op); 113 op->cmd.opcode = (op->cmd.opcode << 8) | ext; 114 op->cmd.nbytes = 2; 115 } 116 } 117 118 /** 119 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data 120 * transfer 121 * @nor: pointer to 'struct spi_nor' 122 * @op: pointer to 'struct spi_mem_op' template for transfer 123 * 124 * If we have to use the bounce buffer, the data field in @op will be updated. 125 * 126 * Return: true if the bounce buffer is needed, false if not 127 */ 128 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op) 129 { 130 /* op->data.buf.in occupies the same memory as op->data.buf.out */ 131 if (object_is_on_stack(op->data.buf.in) || 132 !virt_addr_valid(op->data.buf.in)) { 133 if (op->data.nbytes > nor->bouncebuf_size) 134 op->data.nbytes = nor->bouncebuf_size; 135 op->data.buf.in = nor->bouncebuf; 136 return true; 137 } 138 139 return false; 140 } 141 142 /** 143 * spi_nor_spimem_exec_op() - execute a memory operation 144 * @nor: pointer to 'struct spi_nor' 145 * @op: pointer to 'struct spi_mem_op' template for transfer 146 * 147 * Return: 0 on success, -error otherwise. 148 */ 149 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op) 150 { 151 int error; 152 153 error = spi_mem_adjust_op_size(nor->spimem, op); 154 if (error) 155 return error; 156 157 return spi_mem_exec_op(nor->spimem, op); 158 } 159 160 static int spi_nor_controller_ops_read_reg(struct spi_nor *nor, u8 opcode, 161 u8 *buf, size_t len) 162 { 163 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 164 return -EOPNOTSUPP; 165 166 return nor->controller_ops->read_reg(nor, opcode, buf, len); 167 } 168 169 static int spi_nor_controller_ops_write_reg(struct spi_nor *nor, u8 opcode, 170 const u8 *buf, size_t len) 171 { 172 if (spi_nor_protocol_is_dtr(nor->reg_proto)) 173 return -EOPNOTSUPP; 174 175 return nor->controller_ops->write_reg(nor, opcode, buf, len); 176 } 177 178 static int spi_nor_controller_ops_erase(struct spi_nor *nor, loff_t offs) 179 { 180 if (spi_nor_protocol_is_dtr(nor->write_proto)) 181 return -EOPNOTSUPP; 182 183 return nor->controller_ops->erase(nor, offs); 184 } 185 186 /** 187 * spi_nor_spimem_read_data() - read data from flash's memory region via 188 * spi-mem 189 * @nor: pointer to 'struct spi_nor' 190 * @from: offset to read from 191 * @len: number of bytes to read 192 * @buf: pointer to dst buffer 193 * 194 * Return: number of bytes read successfully, -errno otherwise 195 */ 196 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from, 197 size_t len, u8 *buf) 198 { 199 struct spi_mem_op op = 200 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 201 SPI_MEM_OP_ADDR(nor->addr_width, from, 0), 202 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 203 SPI_MEM_OP_DATA_IN(len, buf, 0)); 204 bool usebouncebuf; 205 ssize_t nbytes; 206 int error; 207 208 spi_nor_spimem_setup_op(nor, &op, nor->read_proto); 209 210 /* convert the dummy cycles to the number of bytes */ 211 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 212 if (spi_nor_protocol_is_dtr(nor->read_proto)) 213 op.dummy.nbytes *= 2; 214 215 usebouncebuf = spi_nor_spimem_bounce(nor, &op); 216 217 if (nor->dirmap.rdesc) { 218 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val, 219 op.data.nbytes, op.data.buf.in); 220 } else { 221 error = spi_nor_spimem_exec_op(nor, &op); 222 if (error) 223 return error; 224 nbytes = op.data.nbytes; 225 } 226 227 if (usebouncebuf && nbytes > 0) 228 memcpy(buf, op.data.buf.in, nbytes); 229 230 return nbytes; 231 } 232 233 /** 234 * spi_nor_read_data() - read data from flash memory 235 * @nor: pointer to 'struct spi_nor' 236 * @from: offset to read from 237 * @len: number of bytes to read 238 * @buf: pointer to dst buffer 239 * 240 * Return: number of bytes read successfully, -errno otherwise 241 */ 242 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf) 243 { 244 if (nor->spimem) 245 return spi_nor_spimem_read_data(nor, from, len, buf); 246 247 return nor->controller_ops->read(nor, from, len, buf); 248 } 249 250 /** 251 * spi_nor_spimem_write_data() - write data to flash memory via 252 * spi-mem 253 * @nor: pointer to 'struct spi_nor' 254 * @to: offset to write to 255 * @len: number of bytes to write 256 * @buf: pointer to src buffer 257 * 258 * Return: number of bytes written successfully, -errno otherwise 259 */ 260 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to, 261 size_t len, const u8 *buf) 262 { 263 struct spi_mem_op op = 264 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 265 SPI_MEM_OP_ADDR(nor->addr_width, to, 0), 266 SPI_MEM_OP_NO_DUMMY, 267 SPI_MEM_OP_DATA_OUT(len, buf, 0)); 268 ssize_t nbytes; 269 int error; 270 271 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 272 op.addr.nbytes = 0; 273 274 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 275 276 if (spi_nor_spimem_bounce(nor, &op)) 277 memcpy(nor->bouncebuf, buf, op.data.nbytes); 278 279 if (nor->dirmap.wdesc) { 280 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val, 281 op.data.nbytes, op.data.buf.out); 282 } else { 283 error = spi_nor_spimem_exec_op(nor, &op); 284 if (error) 285 return error; 286 nbytes = op.data.nbytes; 287 } 288 289 return nbytes; 290 } 291 292 /** 293 * spi_nor_write_data() - write data to flash memory 294 * @nor: pointer to 'struct spi_nor' 295 * @to: offset to write to 296 * @len: number of bytes to write 297 * @buf: pointer to src buffer 298 * 299 * Return: number of bytes written successfully, -errno otherwise 300 */ 301 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, 302 const u8 *buf) 303 { 304 if (nor->spimem) 305 return spi_nor_spimem_write_data(nor, to, len, buf); 306 307 return nor->controller_ops->write(nor, to, len, buf); 308 } 309 310 /** 311 * spi_nor_write_enable() - Set write enable latch with Write Enable command. 312 * @nor: pointer to 'struct spi_nor'. 313 * 314 * Return: 0 on success, -errno otherwise. 315 */ 316 int spi_nor_write_enable(struct spi_nor *nor) 317 { 318 int ret; 319 320 if (nor->spimem) { 321 struct spi_mem_op op = 322 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 0), 323 SPI_MEM_OP_NO_ADDR, 324 SPI_MEM_OP_NO_DUMMY, 325 SPI_MEM_OP_NO_DATA); 326 327 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 328 329 ret = spi_mem_exec_op(nor->spimem, &op); 330 } else { 331 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREN, 332 NULL, 0); 333 } 334 335 if (ret) 336 dev_dbg(nor->dev, "error %d on Write Enable\n", ret); 337 338 return ret; 339 } 340 341 /** 342 * spi_nor_write_disable() - Send Write Disable instruction to the chip. 343 * @nor: pointer to 'struct spi_nor'. 344 * 345 * Return: 0 on success, -errno otherwise. 346 */ 347 int spi_nor_write_disable(struct spi_nor *nor) 348 { 349 int ret; 350 351 if (nor->spimem) { 352 struct spi_mem_op op = 353 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 0), 354 SPI_MEM_OP_NO_ADDR, 355 SPI_MEM_OP_NO_DUMMY, 356 SPI_MEM_OP_NO_DATA); 357 358 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 359 360 ret = spi_mem_exec_op(nor->spimem, &op); 361 } else { 362 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRDI, 363 NULL, 0); 364 } 365 366 if (ret) 367 dev_dbg(nor->dev, "error %d on Write Disable\n", ret); 368 369 return ret; 370 } 371 372 /** 373 * spi_nor_read_sr() - Read the Status Register. 374 * @nor: pointer to 'struct spi_nor'. 375 * @sr: pointer to a DMA-able buffer where the value of the 376 * Status Register will be written. Should be at least 2 bytes. 377 * 378 * Return: 0 on success, -errno otherwise. 379 */ 380 int spi_nor_read_sr(struct spi_nor *nor, u8 *sr) 381 { 382 int ret; 383 384 if (nor->spimem) { 385 struct spi_mem_op op = 386 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 0), 387 SPI_MEM_OP_NO_ADDR, 388 SPI_MEM_OP_NO_DUMMY, 389 SPI_MEM_OP_DATA_IN(1, sr, 0)); 390 391 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { 392 op.addr.nbytes = nor->params->rdsr_addr_nbytes; 393 op.dummy.nbytes = nor->params->rdsr_dummy; 394 /* 395 * We don't want to read only one byte in DTR mode. So, 396 * read 2 and then discard the second byte. 397 */ 398 op.data.nbytes = 2; 399 } 400 401 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 402 403 ret = spi_mem_exec_op(nor->spimem, &op); 404 } else { 405 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR, sr, 406 1); 407 } 408 409 if (ret) 410 dev_dbg(nor->dev, "error %d reading SR\n", ret); 411 412 return ret; 413 } 414 415 /** 416 * spi_nor_read_fsr() - Read the Flag Status Register. 417 * @nor: pointer to 'struct spi_nor' 418 * @fsr: pointer to a DMA-able buffer where the value of the 419 * Flag Status Register will be written. Should be at least 2 420 * bytes. 421 * 422 * Return: 0 on success, -errno otherwise. 423 */ 424 static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr) 425 { 426 int ret; 427 428 if (nor->spimem) { 429 struct spi_mem_op op = 430 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 0), 431 SPI_MEM_OP_NO_ADDR, 432 SPI_MEM_OP_NO_DUMMY, 433 SPI_MEM_OP_DATA_IN(1, fsr, 0)); 434 435 if (nor->reg_proto == SNOR_PROTO_8_8_8_DTR) { 436 op.addr.nbytes = nor->params->rdsr_addr_nbytes; 437 op.dummy.nbytes = nor->params->rdsr_dummy; 438 /* 439 * We don't want to read only one byte in DTR mode. So, 440 * read 2 and then discard the second byte. 441 */ 442 op.data.nbytes = 2; 443 } 444 445 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 446 447 ret = spi_mem_exec_op(nor->spimem, &op); 448 } else { 449 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDFSR, fsr, 450 1); 451 } 452 453 if (ret) 454 dev_dbg(nor->dev, "error %d reading FSR\n", ret); 455 456 return ret; 457 } 458 459 /** 460 * spi_nor_read_cr() - Read the Configuration Register using the 461 * SPINOR_OP_RDCR (35h) command. 462 * @nor: pointer to 'struct spi_nor' 463 * @cr: pointer to a DMA-able buffer where the value of the 464 * Configuration Register will be written. 465 * 466 * Return: 0 on success, -errno otherwise. 467 */ 468 int spi_nor_read_cr(struct spi_nor *nor, u8 *cr) 469 { 470 int ret; 471 472 if (nor->spimem) { 473 struct spi_mem_op op = 474 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 0), 475 SPI_MEM_OP_NO_ADDR, 476 SPI_MEM_OP_NO_DUMMY, 477 SPI_MEM_OP_DATA_IN(1, cr, 0)); 478 479 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 480 481 ret = spi_mem_exec_op(nor->spimem, &op); 482 } else { 483 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDCR, cr, 484 1); 485 } 486 487 if (ret) 488 dev_dbg(nor->dev, "error %d reading CR\n", ret); 489 490 return ret; 491 } 492 493 /** 494 * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode. 495 * @nor: pointer to 'struct spi_nor'. 496 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 497 * address mode. 498 * 499 * Return: 0 on success, -errno otherwise. 500 */ 501 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable) 502 { 503 int ret; 504 505 if (nor->spimem) { 506 struct spi_mem_op op = 507 SPI_MEM_OP(SPI_MEM_OP_CMD(enable ? 508 SPINOR_OP_EN4B : 509 SPINOR_OP_EX4B, 510 0), 511 SPI_MEM_OP_NO_ADDR, 512 SPI_MEM_OP_NO_DUMMY, 513 SPI_MEM_OP_NO_DATA); 514 515 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 516 517 ret = spi_mem_exec_op(nor->spimem, &op); 518 } else { 519 ret = spi_nor_controller_ops_write_reg(nor, 520 enable ? SPINOR_OP_EN4B : 521 SPINOR_OP_EX4B, 522 NULL, 0); 523 } 524 525 if (ret) 526 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 527 528 return ret; 529 } 530 531 /** 532 * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion 533 * flashes. 534 * @nor: pointer to 'struct spi_nor'. 535 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte 536 * address mode. 537 * 538 * Return: 0 on success, -errno otherwise. 539 */ 540 static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable) 541 { 542 int ret; 543 544 nor->bouncebuf[0] = enable << 7; 545 546 if (nor->spimem) { 547 struct spi_mem_op op = 548 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 0), 549 SPI_MEM_OP_NO_ADDR, 550 SPI_MEM_OP_NO_DUMMY, 551 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0)); 552 553 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 554 555 ret = spi_mem_exec_op(nor->spimem, &op); 556 } else { 557 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_BRWR, 558 nor->bouncebuf, 1); 559 } 560 561 if (ret) 562 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret); 563 564 return ret; 565 } 566 567 /** 568 * spi_nor_write_ear() - Write Extended Address Register. 569 * @nor: pointer to 'struct spi_nor'. 570 * @ear: value to write to the Extended Address Register. 571 * 572 * Return: 0 on success, -errno otherwise. 573 */ 574 int spi_nor_write_ear(struct spi_nor *nor, u8 ear) 575 { 576 int ret; 577 578 nor->bouncebuf[0] = ear; 579 580 if (nor->spimem) { 581 struct spi_mem_op op = 582 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 0), 583 SPI_MEM_OP_NO_ADDR, 584 SPI_MEM_OP_NO_DUMMY, 585 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 0)); 586 587 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 588 589 ret = spi_mem_exec_op(nor->spimem, &op); 590 } else { 591 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WREAR, 592 nor->bouncebuf, 1); 593 } 594 595 if (ret) 596 dev_dbg(nor->dev, "error %d writing EAR\n", ret); 597 598 return ret; 599 } 600 601 /** 602 * spi_nor_xread_sr() - Read the Status Register on S3AN flashes. 603 * @nor: pointer to 'struct spi_nor'. 604 * @sr: pointer to a DMA-able buffer where the value of the 605 * Status Register will be written. 606 * 607 * Return: 0 on success, -errno otherwise. 608 */ 609 int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr) 610 { 611 int ret; 612 613 if (nor->spimem) { 614 struct spi_mem_op op = 615 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 0), 616 SPI_MEM_OP_NO_ADDR, 617 SPI_MEM_OP_NO_DUMMY, 618 SPI_MEM_OP_DATA_IN(1, sr, 0)); 619 620 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 621 622 ret = spi_mem_exec_op(nor->spimem, &op); 623 } else { 624 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_XRDSR, sr, 625 1); 626 } 627 628 if (ret) 629 dev_dbg(nor->dev, "error %d reading XRDSR\n", ret); 630 631 return ret; 632 } 633 634 /** 635 * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if 636 * the flash is ready for new commands. 637 * @nor: pointer to 'struct spi_nor'. 638 * 639 * Return: 1 if ready, 0 if not ready, -errno on errors. 640 */ 641 static int spi_nor_xsr_ready(struct spi_nor *nor) 642 { 643 int ret; 644 645 ret = spi_nor_xread_sr(nor, nor->bouncebuf); 646 if (ret) 647 return ret; 648 649 return !!(nor->bouncebuf[0] & XSR_RDY); 650 } 651 652 /** 653 * spi_nor_clear_sr() - Clear the Status Register. 654 * @nor: pointer to 'struct spi_nor'. 655 */ 656 static void spi_nor_clear_sr(struct spi_nor *nor) 657 { 658 int ret; 659 660 if (nor->spimem) { 661 struct spi_mem_op op = 662 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 0), 663 SPI_MEM_OP_NO_ADDR, 664 SPI_MEM_OP_NO_DUMMY, 665 SPI_MEM_OP_NO_DATA); 666 667 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 668 669 ret = spi_mem_exec_op(nor->spimem, &op); 670 } else { 671 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLSR, 672 NULL, 0); 673 } 674 675 if (ret) 676 dev_dbg(nor->dev, "error %d clearing SR\n", ret); 677 } 678 679 /** 680 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready 681 * for new commands. 682 * @nor: pointer to 'struct spi_nor'. 683 * 684 * Return: 1 if ready, 0 if not ready, -errno on errors. 685 */ 686 static int spi_nor_sr_ready(struct spi_nor *nor) 687 { 688 int ret = spi_nor_read_sr(nor, nor->bouncebuf); 689 690 if (ret) 691 return ret; 692 693 if (nor->flags & SNOR_F_USE_CLSR && 694 nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) { 695 if (nor->bouncebuf[0] & SR_E_ERR) 696 dev_err(nor->dev, "Erase Error occurred\n"); 697 else 698 dev_err(nor->dev, "Programming Error occurred\n"); 699 700 spi_nor_clear_sr(nor); 701 702 /* 703 * WEL bit remains set to one when an erase or page program 704 * error occurs. Issue a Write Disable command to protect 705 * against inadvertent writes that can possibly corrupt the 706 * contents of the memory. 707 */ 708 ret = spi_nor_write_disable(nor); 709 if (ret) 710 return ret; 711 712 return -EIO; 713 } 714 715 return !(nor->bouncebuf[0] & SR_WIP); 716 } 717 718 /** 719 * spi_nor_clear_fsr() - Clear the Flag Status Register. 720 * @nor: pointer to 'struct spi_nor'. 721 */ 722 static void spi_nor_clear_fsr(struct spi_nor *nor) 723 { 724 int ret; 725 726 if (nor->spimem) { 727 struct spi_mem_op op = 728 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 0), 729 SPI_MEM_OP_NO_ADDR, 730 SPI_MEM_OP_NO_DUMMY, 731 SPI_MEM_OP_NO_DATA); 732 733 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 734 735 ret = spi_mem_exec_op(nor->spimem, &op); 736 } else { 737 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_CLFSR, 738 NULL, 0); 739 } 740 741 if (ret) 742 dev_dbg(nor->dev, "error %d clearing FSR\n", ret); 743 } 744 745 /** 746 * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is 747 * ready for new commands. 748 * @nor: pointer to 'struct spi_nor'. 749 * 750 * Return: 1 if ready, 0 if not ready, -errno on errors. 751 */ 752 static int spi_nor_fsr_ready(struct spi_nor *nor) 753 { 754 int ret = spi_nor_read_fsr(nor, nor->bouncebuf); 755 756 if (ret) 757 return ret; 758 759 if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) { 760 if (nor->bouncebuf[0] & FSR_E_ERR) 761 dev_err(nor->dev, "Erase operation failed.\n"); 762 else 763 dev_err(nor->dev, "Program operation failed.\n"); 764 765 if (nor->bouncebuf[0] & FSR_PT_ERR) 766 dev_err(nor->dev, 767 "Attempted to modify a protected sector.\n"); 768 769 spi_nor_clear_fsr(nor); 770 771 /* 772 * WEL bit remains set to one when an erase or page program 773 * error occurs. Issue a Write Disable command to protect 774 * against inadvertent writes that can possibly corrupt the 775 * contents of the memory. 776 */ 777 ret = spi_nor_write_disable(nor); 778 if (ret) 779 return ret; 780 781 return -EIO; 782 } 783 784 return !!(nor->bouncebuf[0] & FSR_READY); 785 } 786 787 /** 788 * spi_nor_ready() - Query the flash to see if it is ready for new commands. 789 * @nor: pointer to 'struct spi_nor'. 790 * 791 * Return: 1 if ready, 0 if not ready, -errno on errors. 792 */ 793 static int spi_nor_ready(struct spi_nor *nor) 794 { 795 int sr, fsr; 796 797 if (nor->flags & SNOR_F_READY_XSR_RDY) 798 sr = spi_nor_xsr_ready(nor); 799 else 800 sr = spi_nor_sr_ready(nor); 801 if (sr < 0) 802 return sr; 803 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; 804 if (fsr < 0) 805 return fsr; 806 return sr && fsr; 807 } 808 809 /** 810 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the 811 * Status Register until ready, or timeout occurs. 812 * @nor: pointer to "struct spi_nor". 813 * @timeout_jiffies: jiffies to wait until timeout. 814 * 815 * Return: 0 on success, -errno otherwise. 816 */ 817 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, 818 unsigned long timeout_jiffies) 819 { 820 unsigned long deadline; 821 int timeout = 0, ret; 822 823 deadline = jiffies + timeout_jiffies; 824 825 while (!timeout) { 826 if (time_after_eq(jiffies, deadline)) 827 timeout = 1; 828 829 ret = spi_nor_ready(nor); 830 if (ret < 0) 831 return ret; 832 if (ret) 833 return 0; 834 835 cond_resched(); 836 } 837 838 dev_dbg(nor->dev, "flash operation timed out\n"); 839 840 return -ETIMEDOUT; 841 } 842 843 /** 844 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the 845 * flash to be ready, or timeout occurs. 846 * @nor: pointer to "struct spi_nor". 847 * 848 * Return: 0 on success, -errno otherwise. 849 */ 850 int spi_nor_wait_till_ready(struct spi_nor *nor) 851 { 852 return spi_nor_wait_till_ready_with_timeout(nor, 853 DEFAULT_READY_WAIT_JIFFIES); 854 } 855 856 /** 857 * spi_nor_global_block_unlock() - Unlock Global Block Protection. 858 * @nor: pointer to 'struct spi_nor'. 859 * 860 * Return: 0 on success, -errno otherwise. 861 */ 862 int spi_nor_global_block_unlock(struct spi_nor *nor) 863 { 864 int ret; 865 866 ret = spi_nor_write_enable(nor); 867 if (ret) 868 return ret; 869 870 if (nor->spimem) { 871 struct spi_mem_op op = 872 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_GBULK, 0), 873 SPI_MEM_OP_NO_ADDR, 874 SPI_MEM_OP_NO_DUMMY, 875 SPI_MEM_OP_NO_DATA); 876 877 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 878 879 ret = spi_mem_exec_op(nor->spimem, &op); 880 } else { 881 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_GBULK, 882 NULL, 0); 883 } 884 885 if (ret) { 886 dev_dbg(nor->dev, "error %d on Global Block Unlock\n", ret); 887 return ret; 888 } 889 890 return spi_nor_wait_till_ready(nor); 891 } 892 893 /** 894 * spi_nor_write_sr() - Write the Status Register. 895 * @nor: pointer to 'struct spi_nor'. 896 * @sr: pointer to DMA-able buffer to write to the Status Register. 897 * @len: number of bytes to write to the Status Register. 898 * 899 * Return: 0 on success, -errno otherwise. 900 */ 901 int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len) 902 { 903 int ret; 904 905 ret = spi_nor_write_enable(nor); 906 if (ret) 907 return ret; 908 909 if (nor->spimem) { 910 struct spi_mem_op op = 911 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 0), 912 SPI_MEM_OP_NO_ADDR, 913 SPI_MEM_OP_NO_DUMMY, 914 SPI_MEM_OP_DATA_OUT(len, sr, 0)); 915 916 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 917 918 ret = spi_mem_exec_op(nor->spimem, &op); 919 } else { 920 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR, sr, 921 len); 922 } 923 924 if (ret) { 925 dev_dbg(nor->dev, "error %d writing SR\n", ret); 926 return ret; 927 } 928 929 return spi_nor_wait_till_ready(nor); 930 } 931 932 /** 933 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and 934 * ensure that the byte written match the received value. 935 * @nor: pointer to a 'struct spi_nor'. 936 * @sr1: byte value to be written to the Status Register. 937 * 938 * Return: 0 on success, -errno otherwise. 939 */ 940 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1) 941 { 942 int ret; 943 944 nor->bouncebuf[0] = sr1; 945 946 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1); 947 if (ret) 948 return ret; 949 950 ret = spi_nor_read_sr(nor, nor->bouncebuf); 951 if (ret) 952 return ret; 953 954 if (nor->bouncebuf[0] != sr1) { 955 dev_dbg(nor->dev, "SR1: read back test failed\n"); 956 return -EIO; 957 } 958 959 return 0; 960 } 961 962 /** 963 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the 964 * Status Register 2 in one shot. Ensure that the byte written in the Status 965 * Register 1 match the received value, and that the 16-bit Write did not 966 * affect what was already in the Status Register 2. 967 * @nor: pointer to a 'struct spi_nor'. 968 * @sr1: byte value to be written to the Status Register 1. 969 * 970 * Return: 0 on success, -errno otherwise. 971 */ 972 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1) 973 { 974 int ret; 975 u8 *sr_cr = nor->bouncebuf; 976 u8 cr_written; 977 978 /* Make sure we don't overwrite the contents of Status Register 2. */ 979 if (!(nor->flags & SNOR_F_NO_READ_CR)) { 980 ret = spi_nor_read_cr(nor, &sr_cr[1]); 981 if (ret) 982 return ret; 983 } else if (nor->params->quad_enable) { 984 /* 985 * If the Status Register 2 Read command (35h) is not 986 * supported, we should at least be sure we don't 987 * change the value of the SR2 Quad Enable bit. 988 * 989 * We can safely assume that when the Quad Enable method is 990 * set, the value of the QE bit is one, as a consequence of the 991 * nor->params->quad_enable() call. 992 * 993 * We can safely assume that the Quad Enable bit is present in 994 * the Status Register 2 at BIT(1). According to the JESD216 995 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit 996 * Write Status (01h) command is available just for the cases 997 * in which the QE bit is described in SR2 at BIT(1). 998 */ 999 sr_cr[1] = SR2_QUAD_EN_BIT1; 1000 } else { 1001 sr_cr[1] = 0; 1002 } 1003 1004 sr_cr[0] = sr1; 1005 1006 ret = spi_nor_write_sr(nor, sr_cr, 2); 1007 if (ret) 1008 return ret; 1009 1010 if (nor->flags & SNOR_F_NO_READ_CR) 1011 return 0; 1012 1013 cr_written = sr_cr[1]; 1014 1015 ret = spi_nor_read_cr(nor, &sr_cr[1]); 1016 if (ret) 1017 return ret; 1018 1019 if (cr_written != sr_cr[1]) { 1020 dev_dbg(nor->dev, "CR: read back test failed\n"); 1021 return -EIO; 1022 } 1023 1024 return 0; 1025 } 1026 1027 /** 1028 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the 1029 * Configuration Register in one shot. Ensure that the byte written in the 1030 * Configuration Register match the received value, and that the 16-bit Write 1031 * did not affect what was already in the Status Register 1. 1032 * @nor: pointer to a 'struct spi_nor'. 1033 * @cr: byte value to be written to the Configuration Register. 1034 * 1035 * Return: 0 on success, -errno otherwise. 1036 */ 1037 int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr) 1038 { 1039 int ret; 1040 u8 *sr_cr = nor->bouncebuf; 1041 u8 sr_written; 1042 1043 /* Keep the current value of the Status Register 1. */ 1044 ret = spi_nor_read_sr(nor, sr_cr); 1045 if (ret) 1046 return ret; 1047 1048 sr_cr[1] = cr; 1049 1050 ret = spi_nor_write_sr(nor, sr_cr, 2); 1051 if (ret) 1052 return ret; 1053 1054 sr_written = sr_cr[0]; 1055 1056 ret = spi_nor_read_sr(nor, sr_cr); 1057 if (ret) 1058 return ret; 1059 1060 if (sr_written != sr_cr[0]) { 1061 dev_dbg(nor->dev, "SR: Read back test failed\n"); 1062 return -EIO; 1063 } 1064 1065 if (nor->flags & SNOR_F_NO_READ_CR) 1066 return 0; 1067 1068 ret = spi_nor_read_cr(nor, &sr_cr[1]); 1069 if (ret) 1070 return ret; 1071 1072 if (cr != sr_cr[1]) { 1073 dev_dbg(nor->dev, "CR: read back test failed\n"); 1074 return -EIO; 1075 } 1076 1077 return 0; 1078 } 1079 1080 /** 1081 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that 1082 * the byte written match the received value without affecting other bits in the 1083 * Status Register 1 and 2. 1084 * @nor: pointer to a 'struct spi_nor'. 1085 * @sr1: byte value to be written to the Status Register. 1086 * 1087 * Return: 0 on success, -errno otherwise. 1088 */ 1089 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1) 1090 { 1091 if (nor->flags & SNOR_F_HAS_16BIT_SR) 1092 return spi_nor_write_16bit_sr_and_check(nor, sr1); 1093 1094 return spi_nor_write_sr1_and_check(nor, sr1); 1095 } 1096 1097 /** 1098 * spi_nor_write_sr2() - Write the Status Register 2 using the 1099 * SPINOR_OP_WRSR2 (3eh) command. 1100 * @nor: pointer to 'struct spi_nor'. 1101 * @sr2: pointer to DMA-able buffer to write to the Status Register 2. 1102 * 1103 * Return: 0 on success, -errno otherwise. 1104 */ 1105 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2) 1106 { 1107 int ret; 1108 1109 ret = spi_nor_write_enable(nor); 1110 if (ret) 1111 return ret; 1112 1113 if (nor->spimem) { 1114 struct spi_mem_op op = 1115 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 0), 1116 SPI_MEM_OP_NO_ADDR, 1117 SPI_MEM_OP_NO_DUMMY, 1118 SPI_MEM_OP_DATA_OUT(1, sr2, 0)); 1119 1120 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1121 1122 ret = spi_mem_exec_op(nor->spimem, &op); 1123 } else { 1124 ret = spi_nor_controller_ops_write_reg(nor, SPINOR_OP_WRSR2, 1125 sr2, 1); 1126 } 1127 1128 if (ret) { 1129 dev_dbg(nor->dev, "error %d writing SR2\n", ret); 1130 return ret; 1131 } 1132 1133 return spi_nor_wait_till_ready(nor); 1134 } 1135 1136 /** 1137 * spi_nor_read_sr2() - Read the Status Register 2 using the 1138 * SPINOR_OP_RDSR2 (3fh) command. 1139 * @nor: pointer to 'struct spi_nor'. 1140 * @sr2: pointer to DMA-able buffer where the value of the 1141 * Status Register 2 will be written. 1142 * 1143 * Return: 0 on success, -errno otherwise. 1144 */ 1145 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2) 1146 { 1147 int ret; 1148 1149 if (nor->spimem) { 1150 struct spi_mem_op op = 1151 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 0), 1152 SPI_MEM_OP_NO_ADDR, 1153 SPI_MEM_OP_NO_DUMMY, 1154 SPI_MEM_OP_DATA_IN(1, sr2, 0)); 1155 1156 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 1157 1158 ret = spi_mem_exec_op(nor->spimem, &op); 1159 } else { 1160 ret = spi_nor_controller_ops_read_reg(nor, SPINOR_OP_RDSR2, sr2, 1161 1); 1162 } 1163 1164 if (ret) 1165 dev_dbg(nor->dev, "error %d reading SR2\n", ret); 1166 1167 return ret; 1168 } 1169 1170 /** 1171 * spi_nor_erase_chip() - Erase the entire flash memory. 1172 * @nor: pointer to 'struct spi_nor'. 1173 * 1174 * Return: 0 on success, -errno otherwise. 1175 */ 1176 static int spi_nor_erase_chip(struct spi_nor *nor) 1177 { 1178 int ret; 1179 1180 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10)); 1181 1182 if (nor->spimem) { 1183 struct spi_mem_op op = 1184 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 0), 1185 SPI_MEM_OP_NO_ADDR, 1186 SPI_MEM_OP_NO_DUMMY, 1187 SPI_MEM_OP_NO_DATA); 1188 1189 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 1190 1191 ret = spi_mem_exec_op(nor->spimem, &op); 1192 } else { 1193 ret = spi_nor_controller_ops_write_reg(nor, 1194 SPINOR_OP_CHIP_ERASE, 1195 NULL, 0); 1196 } 1197 1198 if (ret) 1199 dev_dbg(nor->dev, "error %d erasing chip\n", ret); 1200 1201 return ret; 1202 } 1203 1204 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) 1205 { 1206 size_t i; 1207 1208 for (i = 0; i < size; i++) 1209 if (table[i][0] == opcode) 1210 return table[i][1]; 1211 1212 /* No conversion found, keep input op code. */ 1213 return opcode; 1214 } 1215 1216 u8 spi_nor_convert_3to4_read(u8 opcode) 1217 { 1218 static const u8 spi_nor_3to4_read[][2] = { 1219 { SPINOR_OP_READ, SPINOR_OP_READ_4B }, 1220 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, 1221 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, 1222 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, 1223 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, 1224 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, 1225 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, 1226 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, 1227 1228 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, 1229 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, 1230 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, 1231 }; 1232 1233 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, 1234 ARRAY_SIZE(spi_nor_3to4_read)); 1235 } 1236 1237 static u8 spi_nor_convert_3to4_program(u8 opcode) 1238 { 1239 static const u8 spi_nor_3to4_program[][2] = { 1240 { SPINOR_OP_PP, SPINOR_OP_PP_4B }, 1241 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, 1242 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, 1243 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, 1244 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, 1245 }; 1246 1247 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, 1248 ARRAY_SIZE(spi_nor_3to4_program)); 1249 } 1250 1251 static u8 spi_nor_convert_3to4_erase(u8 opcode) 1252 { 1253 static const u8 spi_nor_3to4_erase[][2] = { 1254 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, 1255 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, 1256 { SPINOR_OP_SE, SPINOR_OP_SE_4B }, 1257 }; 1258 1259 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, 1260 ARRAY_SIZE(spi_nor_3to4_erase)); 1261 } 1262 1263 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) 1264 { 1265 return !!nor->params->erase_map.uniform_erase_type; 1266 } 1267 1268 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor) 1269 { 1270 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); 1271 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); 1272 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); 1273 1274 if (!spi_nor_has_uniform_erase(nor)) { 1275 struct spi_nor_erase_map *map = &nor->params->erase_map; 1276 struct spi_nor_erase_type *erase; 1277 int i; 1278 1279 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1280 erase = &map->erase_type[i]; 1281 erase->opcode = 1282 spi_nor_convert_3to4_erase(erase->opcode); 1283 } 1284 } 1285 } 1286 1287 int spi_nor_lock_and_prep(struct spi_nor *nor) 1288 { 1289 int ret = 0; 1290 1291 mutex_lock(&nor->lock); 1292 1293 if (nor->controller_ops && nor->controller_ops->prepare) { 1294 ret = nor->controller_ops->prepare(nor); 1295 if (ret) { 1296 mutex_unlock(&nor->lock); 1297 return ret; 1298 } 1299 } 1300 return ret; 1301 } 1302 1303 void spi_nor_unlock_and_unprep(struct spi_nor *nor) 1304 { 1305 if (nor->controller_ops && nor->controller_ops->unprepare) 1306 nor->controller_ops->unprepare(nor); 1307 mutex_unlock(&nor->lock); 1308 } 1309 1310 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr) 1311 { 1312 if (!nor->params->convert_addr) 1313 return addr; 1314 1315 return nor->params->convert_addr(nor, addr); 1316 } 1317 1318 /* 1319 * Initiate the erasure of a single sector 1320 */ 1321 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) 1322 { 1323 int i; 1324 1325 addr = spi_nor_convert_addr(nor, addr); 1326 1327 if (nor->spimem) { 1328 struct spi_mem_op op = 1329 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 0), 1330 SPI_MEM_OP_ADDR(nor->addr_width, addr, 0), 1331 SPI_MEM_OP_NO_DUMMY, 1332 SPI_MEM_OP_NO_DATA); 1333 1334 spi_nor_spimem_setup_op(nor, &op, nor->write_proto); 1335 1336 return spi_mem_exec_op(nor->spimem, &op); 1337 } else if (nor->controller_ops->erase) { 1338 return spi_nor_controller_ops_erase(nor, addr); 1339 } 1340 1341 /* 1342 * Default implementation, if driver doesn't have a specialized HW 1343 * control 1344 */ 1345 for (i = nor->addr_width - 1; i >= 0; i--) { 1346 nor->bouncebuf[i] = addr & 0xff; 1347 addr >>= 8; 1348 } 1349 1350 return spi_nor_controller_ops_write_reg(nor, nor->erase_opcode, 1351 nor->bouncebuf, nor->addr_width); 1352 } 1353 1354 /** 1355 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend 1356 * @erase: pointer to a structure that describes a SPI NOR erase type 1357 * @dividend: dividend value 1358 * @remainder: pointer to u32 remainder (will be updated) 1359 * 1360 * Return: the result of the division 1361 */ 1362 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase, 1363 u64 dividend, u32 *remainder) 1364 { 1365 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 1366 *remainder = (u32)dividend & erase->size_mask; 1367 return dividend >> erase->size_shift; 1368 } 1369 1370 /** 1371 * spi_nor_find_best_erase_type() - find the best erase type for the given 1372 * offset in the serial flash memory and the 1373 * number of bytes to erase. The region in 1374 * which the address fits is expected to be 1375 * provided. 1376 * @map: the erase map of the SPI NOR 1377 * @region: pointer to a structure that describes a SPI NOR erase region 1378 * @addr: offset in the serial flash memory 1379 * @len: number of bytes to erase 1380 * 1381 * Return: a pointer to the best fitted erase type, NULL otherwise. 1382 */ 1383 static const struct spi_nor_erase_type * 1384 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, 1385 const struct spi_nor_erase_region *region, 1386 u64 addr, u32 len) 1387 { 1388 const struct spi_nor_erase_type *erase; 1389 u32 rem; 1390 int i; 1391 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 1392 1393 /* 1394 * Erase types are ordered by size, with the smallest erase type at 1395 * index 0. 1396 */ 1397 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 1398 /* Does the erase region support the tested erase type? */ 1399 if (!(erase_mask & BIT(i))) 1400 continue; 1401 1402 erase = &map->erase_type[i]; 1403 1404 /* Alignment is not mandatory for overlaid regions */ 1405 if (region->offset & SNOR_OVERLAID_REGION && 1406 region->size <= len) 1407 return erase; 1408 1409 /* Don't erase more than what the user has asked for. */ 1410 if (erase->size > len) 1411 continue; 1412 1413 spi_nor_div_by_erase_size(erase, addr, &rem); 1414 if (rem) 1415 continue; 1416 else 1417 return erase; 1418 } 1419 1420 return NULL; 1421 } 1422 1423 static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region) 1424 { 1425 return region->offset & SNOR_LAST_REGION; 1426 } 1427 1428 static u64 spi_nor_region_end(const struct spi_nor_erase_region *region) 1429 { 1430 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size; 1431 } 1432 1433 /** 1434 * spi_nor_region_next() - get the next spi nor region 1435 * @region: pointer to a structure that describes a SPI NOR erase region 1436 * 1437 * Return: the next spi nor region or NULL if last region. 1438 */ 1439 struct spi_nor_erase_region * 1440 spi_nor_region_next(struct spi_nor_erase_region *region) 1441 { 1442 if (spi_nor_region_is_last(region)) 1443 return NULL; 1444 region++; 1445 return region; 1446 } 1447 1448 /** 1449 * spi_nor_find_erase_region() - find the region of the serial flash memory in 1450 * which the offset fits 1451 * @map: the erase map of the SPI NOR 1452 * @addr: offset in the serial flash memory 1453 * 1454 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno) 1455 * otherwise. 1456 */ 1457 static struct spi_nor_erase_region * 1458 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr) 1459 { 1460 struct spi_nor_erase_region *region = map->regions; 1461 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; 1462 u64 region_end = region_start + region->size; 1463 1464 while (addr < region_start || addr >= region_end) { 1465 region = spi_nor_region_next(region); 1466 if (!region) 1467 return ERR_PTR(-EINVAL); 1468 1469 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK; 1470 region_end = region_start + region->size; 1471 } 1472 1473 return region; 1474 } 1475 1476 /** 1477 * spi_nor_init_erase_cmd() - initialize an erase command 1478 * @region: pointer to a structure that describes a SPI NOR erase region 1479 * @erase: pointer to a structure that describes a SPI NOR erase type 1480 * 1481 * Return: the pointer to the allocated erase command, ERR_PTR(-errno) 1482 * otherwise. 1483 */ 1484 static struct spi_nor_erase_command * 1485 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region, 1486 const struct spi_nor_erase_type *erase) 1487 { 1488 struct spi_nor_erase_command *cmd; 1489 1490 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); 1491 if (!cmd) 1492 return ERR_PTR(-ENOMEM); 1493 1494 INIT_LIST_HEAD(&cmd->list); 1495 cmd->opcode = erase->opcode; 1496 cmd->count = 1; 1497 1498 if (region->offset & SNOR_OVERLAID_REGION) 1499 cmd->size = region->size; 1500 else 1501 cmd->size = erase->size; 1502 1503 return cmd; 1504 } 1505 1506 /** 1507 * spi_nor_destroy_erase_cmd_list() - destroy erase command list 1508 * @erase_list: list of erase commands 1509 */ 1510 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list) 1511 { 1512 struct spi_nor_erase_command *cmd, *next; 1513 1514 list_for_each_entry_safe(cmd, next, erase_list, list) { 1515 list_del(&cmd->list); 1516 kfree(cmd); 1517 } 1518 } 1519 1520 /** 1521 * spi_nor_init_erase_cmd_list() - initialize erase command list 1522 * @nor: pointer to a 'struct spi_nor' 1523 * @erase_list: list of erase commands to be executed once we validate that the 1524 * erase can be performed 1525 * @addr: offset in the serial flash memory 1526 * @len: number of bytes to erase 1527 * 1528 * Builds the list of best fitted erase commands and verifies if the erase can 1529 * be performed. 1530 * 1531 * Return: 0 on success, -errno otherwise. 1532 */ 1533 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor, 1534 struct list_head *erase_list, 1535 u64 addr, u32 len) 1536 { 1537 const struct spi_nor_erase_map *map = &nor->params->erase_map; 1538 const struct spi_nor_erase_type *erase, *prev_erase = NULL; 1539 struct spi_nor_erase_region *region; 1540 struct spi_nor_erase_command *cmd = NULL; 1541 u64 region_end; 1542 int ret = -EINVAL; 1543 1544 region = spi_nor_find_erase_region(map, addr); 1545 if (IS_ERR(region)) 1546 return PTR_ERR(region); 1547 1548 region_end = spi_nor_region_end(region); 1549 1550 while (len) { 1551 erase = spi_nor_find_best_erase_type(map, region, addr, len); 1552 if (!erase) 1553 goto destroy_erase_cmd_list; 1554 1555 if (prev_erase != erase || 1556 erase->size != cmd->size || 1557 region->offset & SNOR_OVERLAID_REGION) { 1558 cmd = spi_nor_init_erase_cmd(region, erase); 1559 if (IS_ERR(cmd)) { 1560 ret = PTR_ERR(cmd); 1561 goto destroy_erase_cmd_list; 1562 } 1563 1564 list_add_tail(&cmd->list, erase_list); 1565 } else { 1566 cmd->count++; 1567 } 1568 1569 addr += cmd->size; 1570 len -= cmd->size; 1571 1572 if (len && addr >= region_end) { 1573 region = spi_nor_region_next(region); 1574 if (!region) 1575 goto destroy_erase_cmd_list; 1576 region_end = spi_nor_region_end(region); 1577 } 1578 1579 prev_erase = erase; 1580 } 1581 1582 return 0; 1583 1584 destroy_erase_cmd_list: 1585 spi_nor_destroy_erase_cmd_list(erase_list); 1586 return ret; 1587 } 1588 1589 /** 1590 * spi_nor_erase_multi_sectors() - perform a non-uniform erase 1591 * @nor: pointer to a 'struct spi_nor' 1592 * @addr: offset in the serial flash memory 1593 * @len: number of bytes to erase 1594 * 1595 * Build a list of best fitted erase commands and execute it once we validate 1596 * that the erase can be performed. 1597 * 1598 * Return: 0 on success, -errno otherwise. 1599 */ 1600 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len) 1601 { 1602 LIST_HEAD(erase_list); 1603 struct spi_nor_erase_command *cmd, *next; 1604 int ret; 1605 1606 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len); 1607 if (ret) 1608 return ret; 1609 1610 list_for_each_entry_safe(cmd, next, &erase_list, list) { 1611 nor->erase_opcode = cmd->opcode; 1612 while (cmd->count) { 1613 dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n", 1614 cmd->size, cmd->opcode, cmd->count); 1615 1616 ret = spi_nor_write_enable(nor); 1617 if (ret) 1618 goto destroy_erase_cmd_list; 1619 1620 ret = spi_nor_erase_sector(nor, addr); 1621 if (ret) 1622 goto destroy_erase_cmd_list; 1623 1624 ret = spi_nor_wait_till_ready(nor); 1625 if (ret) 1626 goto destroy_erase_cmd_list; 1627 1628 addr += cmd->size; 1629 cmd->count--; 1630 } 1631 list_del(&cmd->list); 1632 kfree(cmd); 1633 } 1634 1635 return 0; 1636 1637 destroy_erase_cmd_list: 1638 spi_nor_destroy_erase_cmd_list(&erase_list); 1639 return ret; 1640 } 1641 1642 /* 1643 * Erase an address range on the nor chip. The address range may extend 1644 * one or more erase sectors. Return an error if there is a problem erasing. 1645 */ 1646 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) 1647 { 1648 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1649 u32 addr, len; 1650 uint32_t rem; 1651 int ret; 1652 1653 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, 1654 (long long)instr->len); 1655 1656 if (spi_nor_has_uniform_erase(nor)) { 1657 div_u64_rem(instr->len, mtd->erasesize, &rem); 1658 if (rem) 1659 return -EINVAL; 1660 } 1661 1662 addr = instr->addr; 1663 len = instr->len; 1664 1665 ret = spi_nor_lock_and_prep(nor); 1666 if (ret) 1667 return ret; 1668 1669 /* whole-chip erase? */ 1670 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { 1671 unsigned long timeout; 1672 1673 ret = spi_nor_write_enable(nor); 1674 if (ret) 1675 goto erase_err; 1676 1677 ret = spi_nor_erase_chip(nor); 1678 if (ret) 1679 goto erase_err; 1680 1681 /* 1682 * Scale the timeout linearly with the size of the flash, with 1683 * a minimum calibrated to an old 2MB flash. We could try to 1684 * pull these from CFI/SFDP, but these values should be good 1685 * enough for now. 1686 */ 1687 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, 1688 CHIP_ERASE_2MB_READY_WAIT_JIFFIES * 1689 (unsigned long)(mtd->size / SZ_2M)); 1690 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); 1691 if (ret) 1692 goto erase_err; 1693 1694 /* REVISIT in some cases we could speed up erasing large regions 1695 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up 1696 * to use "small sector erase", but that's not always optimal. 1697 */ 1698 1699 /* "sector"-at-a-time erase */ 1700 } else if (spi_nor_has_uniform_erase(nor)) { 1701 while (len) { 1702 ret = spi_nor_write_enable(nor); 1703 if (ret) 1704 goto erase_err; 1705 1706 ret = spi_nor_erase_sector(nor, addr); 1707 if (ret) 1708 goto erase_err; 1709 1710 ret = spi_nor_wait_till_ready(nor); 1711 if (ret) 1712 goto erase_err; 1713 1714 addr += mtd->erasesize; 1715 len -= mtd->erasesize; 1716 } 1717 1718 /* erase multiple sectors */ 1719 } else { 1720 ret = spi_nor_erase_multi_sectors(nor, addr, len); 1721 if (ret) 1722 goto erase_err; 1723 } 1724 1725 ret = spi_nor_write_disable(nor); 1726 1727 erase_err: 1728 spi_nor_unlock_and_unprep(nor); 1729 1730 return ret; 1731 } 1732 1733 /** 1734 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status 1735 * Register 1. 1736 * @nor: pointer to a 'struct spi_nor' 1737 * 1738 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. 1739 * 1740 * Return: 0 on success, -errno otherwise. 1741 */ 1742 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) 1743 { 1744 int ret; 1745 1746 ret = spi_nor_read_sr(nor, nor->bouncebuf); 1747 if (ret) 1748 return ret; 1749 1750 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) 1751 return 0; 1752 1753 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; 1754 1755 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); 1756 } 1757 1758 /** 1759 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status 1760 * Register 2. 1761 * @nor: pointer to a 'struct spi_nor'. 1762 * 1763 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. 1764 * 1765 * Return: 0 on success, -errno otherwise. 1766 */ 1767 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) 1768 { 1769 int ret; 1770 1771 if (nor->flags & SNOR_F_NO_READ_CR) 1772 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); 1773 1774 ret = spi_nor_read_cr(nor, nor->bouncebuf); 1775 if (ret) 1776 return ret; 1777 1778 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) 1779 return 0; 1780 1781 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; 1782 1783 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); 1784 } 1785 1786 /** 1787 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. 1788 * @nor: pointer to a 'struct spi_nor' 1789 * 1790 * Set the Quad Enable (QE) bit in the Status Register 2. 1791 * 1792 * This is one of the procedures to set the QE bit described in the SFDP 1793 * (JESD216 rev B) specification but no manufacturer using this procedure has 1794 * been identified yet, hence the name of the function. 1795 * 1796 * Return: 0 on success, -errno otherwise. 1797 */ 1798 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) 1799 { 1800 u8 *sr2 = nor->bouncebuf; 1801 int ret; 1802 u8 sr2_written; 1803 1804 /* Check current Quad Enable bit value. */ 1805 ret = spi_nor_read_sr2(nor, sr2); 1806 if (ret) 1807 return ret; 1808 if (*sr2 & SR2_QUAD_EN_BIT7) 1809 return 0; 1810 1811 /* Update the Quad Enable bit. */ 1812 *sr2 |= SR2_QUAD_EN_BIT7; 1813 1814 ret = spi_nor_write_sr2(nor, sr2); 1815 if (ret) 1816 return ret; 1817 1818 sr2_written = *sr2; 1819 1820 /* Read back and check it. */ 1821 ret = spi_nor_read_sr2(nor, sr2); 1822 if (ret) 1823 return ret; 1824 1825 if (*sr2 != sr2_written) { 1826 dev_dbg(nor->dev, "SR2: Read back test failed\n"); 1827 return -EIO; 1828 } 1829 1830 return 0; 1831 } 1832 1833 static const struct spi_nor_manufacturer *manufacturers[] = { 1834 &spi_nor_atmel, 1835 &spi_nor_catalyst, 1836 &spi_nor_eon, 1837 &spi_nor_esmt, 1838 &spi_nor_everspin, 1839 &spi_nor_fujitsu, 1840 &spi_nor_gigadevice, 1841 &spi_nor_intel, 1842 &spi_nor_issi, 1843 &spi_nor_macronix, 1844 &spi_nor_micron, 1845 &spi_nor_st, 1846 &spi_nor_spansion, 1847 &spi_nor_sst, 1848 &spi_nor_winbond, 1849 &spi_nor_xilinx, 1850 &spi_nor_xmc, 1851 }; 1852 1853 static const struct flash_info * 1854 spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts, 1855 const u8 *id) 1856 { 1857 unsigned int i; 1858 1859 for (i = 0; i < nparts; i++) { 1860 if (parts[i].id_len && 1861 !memcmp(parts[i].id, id, parts[i].id_len)) 1862 return &parts[i]; 1863 } 1864 1865 return NULL; 1866 } 1867 1868 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) 1869 { 1870 const struct flash_info *info; 1871 u8 *id = nor->bouncebuf; 1872 unsigned int i; 1873 int ret; 1874 1875 if (nor->spimem) { 1876 struct spi_mem_op op = 1877 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1), 1878 SPI_MEM_OP_NO_ADDR, 1879 SPI_MEM_OP_NO_DUMMY, 1880 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1)); 1881 1882 ret = spi_mem_exec_op(nor->spimem, &op); 1883 } else { 1884 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id, 1885 SPI_NOR_MAX_ID_LEN); 1886 } 1887 if (ret) { 1888 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret); 1889 return ERR_PTR(ret); 1890 } 1891 1892 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 1893 info = spi_nor_search_part_by_id(manufacturers[i]->parts, 1894 manufacturers[i]->nparts, 1895 id); 1896 if (info) { 1897 nor->manufacturer = manufacturers[i]; 1898 return info; 1899 } 1900 } 1901 1902 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n", 1903 SPI_NOR_MAX_ID_LEN, id); 1904 return ERR_PTR(-ENODEV); 1905 } 1906 1907 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, 1908 size_t *retlen, u_char *buf) 1909 { 1910 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1911 ssize_t ret; 1912 1913 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); 1914 1915 ret = spi_nor_lock_and_prep(nor); 1916 if (ret) 1917 return ret; 1918 1919 while (len) { 1920 loff_t addr = from; 1921 1922 addr = spi_nor_convert_addr(nor, addr); 1923 1924 ret = spi_nor_read_data(nor, addr, len, buf); 1925 if (ret == 0) { 1926 /* We shouldn't see 0-length reads */ 1927 ret = -EIO; 1928 goto read_err; 1929 } 1930 if (ret < 0) 1931 goto read_err; 1932 1933 WARN_ON(ret > len); 1934 *retlen += ret; 1935 buf += ret; 1936 from += ret; 1937 len -= ret; 1938 } 1939 ret = 0; 1940 1941 read_err: 1942 spi_nor_unlock_and_unprep(nor); 1943 return ret; 1944 } 1945 1946 /* 1947 * Write an address range to the nor chip. Data must be written in 1948 * FLASH_PAGESIZE chunks. The address range may be any size provided 1949 * it is within the physical boundaries. 1950 */ 1951 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, 1952 size_t *retlen, const u_char *buf) 1953 { 1954 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1955 size_t page_offset, page_remain, i; 1956 ssize_t ret; 1957 1958 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); 1959 1960 ret = spi_nor_lock_and_prep(nor); 1961 if (ret) 1962 return ret; 1963 1964 for (i = 0; i < len; ) { 1965 ssize_t written; 1966 loff_t addr = to + i; 1967 1968 /* 1969 * If page_size is a power of two, the offset can be quickly 1970 * calculated with an AND operation. On the other cases we 1971 * need to do a modulus operation (more expensive). 1972 */ 1973 if (is_power_of_2(nor->page_size)) { 1974 page_offset = addr & (nor->page_size - 1); 1975 } else { 1976 uint64_t aux = addr; 1977 1978 page_offset = do_div(aux, nor->page_size); 1979 } 1980 /* the size of data remaining on the first page */ 1981 page_remain = min_t(size_t, 1982 nor->page_size - page_offset, len - i); 1983 1984 addr = spi_nor_convert_addr(nor, addr); 1985 1986 ret = spi_nor_write_enable(nor); 1987 if (ret) 1988 goto write_err; 1989 1990 ret = spi_nor_write_data(nor, addr, page_remain, buf + i); 1991 if (ret < 0) 1992 goto write_err; 1993 written = ret; 1994 1995 ret = spi_nor_wait_till_ready(nor); 1996 if (ret) 1997 goto write_err; 1998 *retlen += written; 1999 i += written; 2000 } 2001 2002 write_err: 2003 spi_nor_unlock_and_unprep(nor); 2004 return ret; 2005 } 2006 2007 static int spi_nor_check(struct spi_nor *nor) 2008 { 2009 if (!nor->dev || 2010 (!nor->spimem && !nor->controller_ops) || 2011 (!nor->spimem && nor->controller_ops && 2012 (!nor->controller_ops->read || 2013 !nor->controller_ops->write || 2014 !nor->controller_ops->read_reg || 2015 !nor->controller_ops->write_reg))) { 2016 pr_err("spi-nor: please fill all the necessary fields!\n"); 2017 return -EINVAL; 2018 } 2019 2020 if (nor->spimem && nor->controller_ops) { 2021 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n"); 2022 return -EINVAL; 2023 } 2024 2025 return 0; 2026 } 2027 2028 void 2029 spi_nor_set_read_settings(struct spi_nor_read_command *read, 2030 u8 num_mode_clocks, 2031 u8 num_wait_states, 2032 u8 opcode, 2033 enum spi_nor_protocol proto) 2034 { 2035 read->num_mode_clocks = num_mode_clocks; 2036 read->num_wait_states = num_wait_states; 2037 read->opcode = opcode; 2038 read->proto = proto; 2039 } 2040 2041 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode, 2042 enum spi_nor_protocol proto) 2043 { 2044 pp->opcode = opcode; 2045 pp->proto = proto; 2046 } 2047 2048 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) 2049 { 2050 size_t i; 2051 2052 for (i = 0; i < size; i++) 2053 if (table[i][0] == (int)hwcaps) 2054 return table[i][1]; 2055 2056 return -EINVAL; 2057 } 2058 2059 int spi_nor_hwcaps_read2cmd(u32 hwcaps) 2060 { 2061 static const int hwcaps_read2cmd[][2] = { 2062 { SNOR_HWCAPS_READ, SNOR_CMD_READ }, 2063 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, 2064 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, 2065 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, 2066 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, 2067 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, 2068 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, 2069 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, 2070 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, 2071 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, 2072 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, 2073 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, 2074 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, 2075 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, 2076 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, 2077 { SNOR_HWCAPS_READ_8_8_8_DTR, SNOR_CMD_READ_8_8_8_DTR }, 2078 }; 2079 2080 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, 2081 ARRAY_SIZE(hwcaps_read2cmd)); 2082 } 2083 2084 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) 2085 { 2086 static const int hwcaps_pp2cmd[][2] = { 2087 { SNOR_HWCAPS_PP, SNOR_CMD_PP }, 2088 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, 2089 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, 2090 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, 2091 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, 2092 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, 2093 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, 2094 { SNOR_HWCAPS_PP_8_8_8_DTR, SNOR_CMD_PP_8_8_8_DTR }, 2095 }; 2096 2097 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, 2098 ARRAY_SIZE(hwcaps_pp2cmd)); 2099 } 2100 2101 /** 2102 * spi_nor_spimem_check_op - check if the operation is supported 2103 * by controller 2104 *@nor: pointer to a 'struct spi_nor' 2105 *@op: pointer to op template to be checked 2106 * 2107 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2108 */ 2109 static int spi_nor_spimem_check_op(struct spi_nor *nor, 2110 struct spi_mem_op *op) 2111 { 2112 /* 2113 * First test with 4 address bytes. The opcode itself might 2114 * be a 3B addressing opcode but we don't care, because 2115 * SPI controller implementation should not check the opcode, 2116 * but just the sequence. 2117 */ 2118 op->addr.nbytes = 4; 2119 if (!spi_mem_supports_op(nor->spimem, op)) { 2120 if (nor->mtd.size > SZ_16M) 2121 return -EOPNOTSUPP; 2122 2123 /* If flash size <= 16MB, 3 address bytes are sufficient */ 2124 op->addr.nbytes = 3; 2125 if (!spi_mem_supports_op(nor->spimem, op)) 2126 return -EOPNOTSUPP; 2127 } 2128 2129 return 0; 2130 } 2131 2132 /** 2133 * spi_nor_spimem_check_readop - check if the read op is supported 2134 * by controller 2135 *@nor: pointer to a 'struct spi_nor' 2136 *@read: pointer to op template to be checked 2137 * 2138 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2139 */ 2140 static int spi_nor_spimem_check_readop(struct spi_nor *nor, 2141 const struct spi_nor_read_command *read) 2142 { 2143 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 0), 2144 SPI_MEM_OP_ADDR(3, 0, 0), 2145 SPI_MEM_OP_DUMMY(1, 0), 2146 SPI_MEM_OP_DATA_IN(1, NULL, 0)); 2147 2148 spi_nor_spimem_setup_op(nor, &op, read->proto); 2149 2150 /* convert the dummy cycles to the number of bytes */ 2151 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 2152 if (spi_nor_protocol_is_dtr(nor->read_proto)) 2153 op.dummy.nbytes *= 2; 2154 2155 return spi_nor_spimem_check_op(nor, &op); 2156 } 2157 2158 /** 2159 * spi_nor_spimem_check_pp - check if the page program op is supported 2160 * by controller 2161 *@nor: pointer to a 'struct spi_nor' 2162 *@pp: pointer to op template to be checked 2163 * 2164 * Returns 0 if operation is supported, -EOPNOTSUPP otherwise. 2165 */ 2166 static int spi_nor_spimem_check_pp(struct spi_nor *nor, 2167 const struct spi_nor_pp_command *pp) 2168 { 2169 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 0), 2170 SPI_MEM_OP_ADDR(3, 0, 0), 2171 SPI_MEM_OP_NO_DUMMY, 2172 SPI_MEM_OP_DATA_OUT(1, NULL, 0)); 2173 2174 spi_nor_spimem_setup_op(nor, &op, pp->proto); 2175 2176 return spi_nor_spimem_check_op(nor, &op); 2177 } 2178 2179 /** 2180 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol 2181 * based on SPI controller capabilities 2182 * @nor: pointer to a 'struct spi_nor' 2183 * @hwcaps: pointer to resulting capabilities after adjusting 2184 * according to controller and flash's capability 2185 */ 2186 static void 2187 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps) 2188 { 2189 struct spi_nor_flash_parameter *params = nor->params; 2190 unsigned int cap; 2191 2192 /* X-X-X modes are not supported yet, mask them all. */ 2193 *hwcaps &= ~SNOR_HWCAPS_X_X_X; 2194 2195 /* 2196 * If the reset line is broken, we do not want to enter a stateful 2197 * mode. 2198 */ 2199 if (nor->flags & SNOR_F_BROKEN_RESET) 2200 *hwcaps &= ~(SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR); 2201 2202 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) { 2203 int rdidx, ppidx; 2204 2205 if (!(*hwcaps & BIT(cap))) 2206 continue; 2207 2208 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap)); 2209 if (rdidx >= 0 && 2210 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx])) 2211 *hwcaps &= ~BIT(cap); 2212 2213 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap)); 2214 if (ppidx < 0) 2215 continue; 2216 2217 if (spi_nor_spimem_check_pp(nor, 2218 ¶ms->page_programs[ppidx])) 2219 *hwcaps &= ~BIT(cap); 2220 } 2221 } 2222 2223 /** 2224 * spi_nor_set_erase_type() - set a SPI NOR erase type 2225 * @erase: pointer to a structure that describes a SPI NOR erase type 2226 * @size: the size of the sector/block erased by the erase type 2227 * @opcode: the SPI command op code to erase the sector/block 2228 */ 2229 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size, 2230 u8 opcode) 2231 { 2232 erase->size = size; 2233 erase->opcode = opcode; 2234 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */ 2235 erase->size_shift = ffs(erase->size) - 1; 2236 erase->size_mask = (1 << erase->size_shift) - 1; 2237 } 2238 2239 /** 2240 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map 2241 * @map: the erase map of the SPI NOR 2242 * @erase_mask: bitmask encoding erase types that can erase the entire 2243 * flash memory 2244 * @flash_size: the spi nor flash memory size 2245 */ 2246 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, 2247 u8 erase_mask, u64 flash_size) 2248 { 2249 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */ 2250 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) | 2251 SNOR_LAST_REGION; 2252 map->uniform_region.size = flash_size; 2253 map->regions = &map->uniform_region; 2254 map->uniform_erase_type = erase_mask; 2255 } 2256 2257 int spi_nor_post_bfpt_fixups(struct spi_nor *nor, 2258 const struct sfdp_parameter_header *bfpt_header, 2259 const struct sfdp_bfpt *bfpt) 2260 { 2261 int ret; 2262 2263 if (nor->manufacturer && nor->manufacturer->fixups && 2264 nor->manufacturer->fixups->post_bfpt) { 2265 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header, 2266 bfpt); 2267 if (ret) 2268 return ret; 2269 } 2270 2271 if (nor->info->fixups && nor->info->fixups->post_bfpt) 2272 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt); 2273 2274 return 0; 2275 } 2276 2277 static int spi_nor_select_read(struct spi_nor *nor, 2278 u32 shared_hwcaps) 2279 { 2280 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; 2281 const struct spi_nor_read_command *read; 2282 2283 if (best_match < 0) 2284 return -EINVAL; 2285 2286 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); 2287 if (cmd < 0) 2288 return -EINVAL; 2289 2290 read = &nor->params->reads[cmd]; 2291 nor->read_opcode = read->opcode; 2292 nor->read_proto = read->proto; 2293 2294 /* 2295 * In the SPI NOR framework, we don't need to make the difference 2296 * between mode clock cycles and wait state clock cycles. 2297 * Indeed, the value of the mode clock cycles is used by a QSPI 2298 * flash memory to know whether it should enter or leave its 0-4-4 2299 * (Continuous Read / XIP) mode. 2300 * eXecution In Place is out of the scope of the mtd sub-system. 2301 * Hence we choose to merge both mode and wait state clock cycles 2302 * into the so called dummy clock cycles. 2303 */ 2304 nor->read_dummy = read->num_mode_clocks + read->num_wait_states; 2305 return 0; 2306 } 2307 2308 static int spi_nor_select_pp(struct spi_nor *nor, 2309 u32 shared_hwcaps) 2310 { 2311 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; 2312 const struct spi_nor_pp_command *pp; 2313 2314 if (best_match < 0) 2315 return -EINVAL; 2316 2317 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); 2318 if (cmd < 0) 2319 return -EINVAL; 2320 2321 pp = &nor->params->page_programs[cmd]; 2322 nor->program_opcode = pp->opcode; 2323 nor->write_proto = pp->proto; 2324 return 0; 2325 } 2326 2327 /** 2328 * spi_nor_select_uniform_erase() - select optimum uniform erase type 2329 * @map: the erase map of the SPI NOR 2330 * @wanted_size: the erase type size to search for. Contains the value of 2331 * info->sector_size or of the "small sector" size in case 2332 * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined. 2333 * 2334 * Once the optimum uniform sector erase command is found, disable all the 2335 * other. 2336 * 2337 * Return: pointer to erase type on success, NULL otherwise. 2338 */ 2339 static const struct spi_nor_erase_type * 2340 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map, 2341 const u32 wanted_size) 2342 { 2343 const struct spi_nor_erase_type *tested_erase, *erase = NULL; 2344 int i; 2345 u8 uniform_erase_type = map->uniform_erase_type; 2346 2347 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2348 if (!(uniform_erase_type & BIT(i))) 2349 continue; 2350 2351 tested_erase = &map->erase_type[i]; 2352 2353 /* 2354 * If the current erase size is the one, stop here: 2355 * we have found the right uniform Sector Erase command. 2356 */ 2357 if (tested_erase->size == wanted_size) { 2358 erase = tested_erase; 2359 break; 2360 } 2361 2362 /* 2363 * Otherwise, the current erase size is still a valid candidate. 2364 * Select the biggest valid candidate. 2365 */ 2366 if (!erase && tested_erase->size) 2367 erase = tested_erase; 2368 /* keep iterating to find the wanted_size */ 2369 } 2370 2371 if (!erase) 2372 return NULL; 2373 2374 /* Disable all other Sector Erase commands. */ 2375 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK; 2376 map->uniform_erase_type |= BIT(erase - map->erase_type); 2377 return erase; 2378 } 2379 2380 static int spi_nor_select_erase(struct spi_nor *nor) 2381 { 2382 struct spi_nor_erase_map *map = &nor->params->erase_map; 2383 const struct spi_nor_erase_type *erase = NULL; 2384 struct mtd_info *mtd = &nor->mtd; 2385 u32 wanted_size = nor->info->sector_size; 2386 int i; 2387 2388 /* 2389 * The previous implementation handling Sector Erase commands assumed 2390 * that the SPI flash memory has an uniform layout then used only one 2391 * of the supported erase sizes for all Sector Erase commands. 2392 * So to be backward compatible, the new implementation also tries to 2393 * manage the SPI flash memory as uniform with a single erase sector 2394 * size, when possible. 2395 */ 2396 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS 2397 /* prefer "small sector" erase if possible */ 2398 wanted_size = 4096u; 2399 #endif 2400 2401 if (spi_nor_has_uniform_erase(nor)) { 2402 erase = spi_nor_select_uniform_erase(map, wanted_size); 2403 if (!erase) 2404 return -EINVAL; 2405 nor->erase_opcode = erase->opcode; 2406 mtd->erasesize = erase->size; 2407 return 0; 2408 } 2409 2410 /* 2411 * For non-uniform SPI flash memory, set mtd->erasesize to the 2412 * maximum erase sector size. No need to set nor->erase_opcode. 2413 */ 2414 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { 2415 if (map->erase_type[i].size) { 2416 erase = &map->erase_type[i]; 2417 break; 2418 } 2419 } 2420 2421 if (!erase) 2422 return -EINVAL; 2423 2424 mtd->erasesize = erase->size; 2425 return 0; 2426 } 2427 2428 static int spi_nor_default_setup(struct spi_nor *nor, 2429 const struct spi_nor_hwcaps *hwcaps) 2430 { 2431 struct spi_nor_flash_parameter *params = nor->params; 2432 u32 ignored_mask, shared_mask; 2433 int err; 2434 2435 /* 2436 * Keep only the hardware capabilities supported by both the SPI 2437 * controller and the SPI flash memory. 2438 */ 2439 shared_mask = hwcaps->mask & params->hwcaps.mask; 2440 2441 if (nor->spimem) { 2442 /* 2443 * When called from spi_nor_probe(), all caps are set and we 2444 * need to discard some of them based on what the SPI 2445 * controller actually supports (using spi_mem_supports_op()). 2446 */ 2447 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask); 2448 } else { 2449 /* 2450 * SPI n-n-n protocols are not supported when the SPI 2451 * controller directly implements the spi_nor interface. 2452 * Yet another reason to switch to spi-mem. 2453 */ 2454 ignored_mask = SNOR_HWCAPS_X_X_X | SNOR_HWCAPS_X_X_X_DTR; 2455 if (shared_mask & ignored_mask) { 2456 dev_dbg(nor->dev, 2457 "SPI n-n-n protocols are not supported.\n"); 2458 shared_mask &= ~ignored_mask; 2459 } 2460 } 2461 2462 /* Select the (Fast) Read command. */ 2463 err = spi_nor_select_read(nor, shared_mask); 2464 if (err) { 2465 dev_dbg(nor->dev, 2466 "can't select read settings supported by both the SPI controller and memory.\n"); 2467 return err; 2468 } 2469 2470 /* Select the Page Program command. */ 2471 err = spi_nor_select_pp(nor, shared_mask); 2472 if (err) { 2473 dev_dbg(nor->dev, 2474 "can't select write settings supported by both the SPI controller and memory.\n"); 2475 return err; 2476 } 2477 2478 /* Select the Sector Erase command. */ 2479 err = spi_nor_select_erase(nor); 2480 if (err) { 2481 dev_dbg(nor->dev, 2482 "can't select erase settings supported by both the SPI controller and memory.\n"); 2483 return err; 2484 } 2485 2486 return 0; 2487 } 2488 2489 static int spi_nor_setup(struct spi_nor *nor, 2490 const struct spi_nor_hwcaps *hwcaps) 2491 { 2492 if (!nor->params->setup) 2493 return 0; 2494 2495 return nor->params->setup(nor, hwcaps); 2496 } 2497 2498 /** 2499 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and 2500 * settings based on MFR register and ->default_init() hook. 2501 * @nor: pointer to a 'struct spi_nor'. 2502 */ 2503 static void spi_nor_manufacturer_init_params(struct spi_nor *nor) 2504 { 2505 if (nor->manufacturer && nor->manufacturer->fixups && 2506 nor->manufacturer->fixups->default_init) 2507 nor->manufacturer->fixups->default_init(nor); 2508 2509 if (nor->info->fixups && nor->info->fixups->default_init) 2510 nor->info->fixups->default_init(nor); 2511 } 2512 2513 /** 2514 * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings 2515 * based on JESD216 SFDP standard. 2516 * @nor: pointer to a 'struct spi_nor'. 2517 * 2518 * The method has a roll-back mechanism: in case the SFDP parsing fails, the 2519 * legacy flash parameters and settings will be restored. 2520 */ 2521 static void spi_nor_sfdp_init_params(struct spi_nor *nor) 2522 { 2523 struct spi_nor_flash_parameter sfdp_params; 2524 2525 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); 2526 2527 if (spi_nor_parse_sfdp(nor)) { 2528 memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); 2529 nor->addr_width = 0; 2530 nor->flags &= ~SNOR_F_4B_OPCODES; 2531 } 2532 } 2533 2534 /** 2535 * spi_nor_info_init_params() - Initialize the flash's parameters and settings 2536 * based on nor->info data. 2537 * @nor: pointer to a 'struct spi_nor'. 2538 */ 2539 static void spi_nor_info_init_params(struct spi_nor *nor) 2540 { 2541 struct spi_nor_flash_parameter *params = nor->params; 2542 struct spi_nor_erase_map *map = ¶ms->erase_map; 2543 const struct flash_info *info = nor->info; 2544 struct device_node *np = spi_nor_get_flash_node(nor); 2545 u8 i, erase_mask; 2546 2547 /* Initialize default flash parameters and settings. */ 2548 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 2549 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode; 2550 params->setup = spi_nor_default_setup; 2551 params->otp.org = &info->otp_org; 2552 2553 /* Default to 16-bit Write Status (01h) Command */ 2554 nor->flags |= SNOR_F_HAS_16BIT_SR; 2555 2556 /* Set SPI NOR sizes. */ 2557 params->writesize = 1; 2558 params->size = (u64)info->sector_size * info->n_sectors; 2559 params->page_size = info->page_size; 2560 2561 if (!(info->flags & SPI_NOR_NO_FR)) { 2562 /* Default to Fast Read for DT and non-DT platform devices. */ 2563 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; 2564 2565 /* Mask out Fast Read if not requested at DT instantiation. */ 2566 if (np && !of_property_read_bool(np, "m25p,fast-read")) 2567 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; 2568 } 2569 2570 /* (Fast) Read settings. */ 2571 params->hwcaps.mask |= SNOR_HWCAPS_READ; 2572 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], 2573 0, 0, SPINOR_OP_READ, 2574 SNOR_PROTO_1_1_1); 2575 2576 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST) 2577 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], 2578 0, 8, SPINOR_OP_READ_FAST, 2579 SNOR_PROTO_1_1_1); 2580 2581 if (info->flags & SPI_NOR_DUAL_READ) { 2582 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; 2583 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], 2584 0, 8, SPINOR_OP_READ_1_1_2, 2585 SNOR_PROTO_1_1_2); 2586 } 2587 2588 if (info->flags & SPI_NOR_QUAD_READ) { 2589 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; 2590 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], 2591 0, 8, SPINOR_OP_READ_1_1_4, 2592 SNOR_PROTO_1_1_4); 2593 } 2594 2595 if (info->flags & SPI_NOR_OCTAL_READ) { 2596 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; 2597 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], 2598 0, 8, SPINOR_OP_READ_1_1_8, 2599 SNOR_PROTO_1_1_8); 2600 } 2601 2602 if (info->flags & SPI_NOR_OCTAL_DTR_READ) { 2603 params->hwcaps.mask |= SNOR_HWCAPS_READ_8_8_8_DTR; 2604 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], 2605 0, 20, SPINOR_OP_READ_FAST, 2606 SNOR_PROTO_8_8_8_DTR); 2607 } 2608 2609 /* Page Program settings. */ 2610 params->hwcaps.mask |= SNOR_HWCAPS_PP; 2611 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], 2612 SPINOR_OP_PP, SNOR_PROTO_1_1_1); 2613 2614 if (info->flags & SPI_NOR_OCTAL_DTR_PP) { 2615 params->hwcaps.mask |= SNOR_HWCAPS_PP_8_8_8_DTR; 2616 /* 2617 * Since xSPI Page Program opcode is backward compatible with 2618 * Legacy SPI, use Legacy SPI opcode there as well. 2619 */ 2620 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_8_8_8_DTR], 2621 SPINOR_OP_PP, SNOR_PROTO_8_8_8_DTR); 2622 } 2623 2624 /* 2625 * Sector Erase settings. Sort Erase Types in ascending order, with the 2626 * smallest erase size starting at BIT(0). 2627 */ 2628 erase_mask = 0; 2629 i = 0; 2630 if (info->flags & SECT_4K_PMC) { 2631 erase_mask |= BIT(i); 2632 spi_nor_set_erase_type(&map->erase_type[i], 4096u, 2633 SPINOR_OP_BE_4K_PMC); 2634 i++; 2635 } else if (info->flags & SECT_4K) { 2636 erase_mask |= BIT(i); 2637 spi_nor_set_erase_type(&map->erase_type[i], 4096u, 2638 SPINOR_OP_BE_4K); 2639 i++; 2640 } 2641 erase_mask |= BIT(i); 2642 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size, 2643 SPINOR_OP_SE); 2644 spi_nor_init_uniform_erase_map(map, erase_mask, params->size); 2645 } 2646 2647 /** 2648 * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings 2649 * after SFDP has been parsed (is also called for SPI NORs that do not 2650 * support RDSFDP). 2651 * @nor: pointer to a 'struct spi_nor' 2652 * 2653 * Typically used to tweak various parameters that could not be extracted by 2654 * other means (i.e. when information provided by the SFDP/flash_info tables 2655 * are incomplete or wrong). 2656 */ 2657 static void spi_nor_post_sfdp_fixups(struct spi_nor *nor) 2658 { 2659 if (nor->manufacturer && nor->manufacturer->fixups && 2660 nor->manufacturer->fixups->post_sfdp) 2661 nor->manufacturer->fixups->post_sfdp(nor); 2662 2663 if (nor->info->fixups && nor->info->fixups->post_sfdp) 2664 nor->info->fixups->post_sfdp(nor); 2665 } 2666 2667 /** 2668 * spi_nor_late_init_params() - Late initialization of default flash parameters. 2669 * @nor: pointer to a 'struct spi_nor' 2670 * 2671 * Used to set default flash parameters and settings when the ->default_init() 2672 * hook or the SFDP parser let voids. 2673 */ 2674 static void spi_nor_late_init_params(struct spi_nor *nor) 2675 { 2676 /* 2677 * NOR protection support. When locking_ops are not provided, we pick 2678 * the default ones. 2679 */ 2680 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops) 2681 spi_nor_init_default_locking_ops(nor); 2682 } 2683 2684 /** 2685 * spi_nor_init_params() - Initialize the flash's parameters and settings. 2686 * @nor: pointer to a 'struct spi_nor'. 2687 * 2688 * The flash parameters and settings are initialized based on a sequence of 2689 * calls that are ordered by priority: 2690 * 2691 * 1/ Default flash parameters initialization. The initializations are done 2692 * based on nor->info data: 2693 * spi_nor_info_init_params() 2694 * 2695 * which can be overwritten by: 2696 * 2/ Manufacturer flash parameters initialization. The initializations are 2697 * done based on MFR register, or when the decisions can not be done solely 2698 * based on MFR, by using specific flash_info tweeks, ->default_init(): 2699 * spi_nor_manufacturer_init_params() 2700 * 2701 * which can be overwritten by: 2702 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and 2703 * should be more accurate that the above. 2704 * spi_nor_sfdp_init_params() 2705 * 2706 * Please note that there is a ->post_bfpt() fixup hook that can overwrite 2707 * the flash parameters and settings immediately after parsing the Basic 2708 * Flash Parameter Table. 2709 * 2710 * which can be overwritten by: 2711 * 4/ Post SFDP flash parameters initialization. Used to tweak various 2712 * parameters that could not be extracted by other means (i.e. when 2713 * information provided by the SFDP/flash_info tables are incomplete or 2714 * wrong). 2715 * spi_nor_post_sfdp_fixups() 2716 * 2717 * 5/ Late default flash parameters initialization, used when the 2718 * ->default_init() hook or the SFDP parser do not set specific params. 2719 * spi_nor_late_init_params() 2720 */ 2721 static int spi_nor_init_params(struct spi_nor *nor) 2722 { 2723 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL); 2724 if (!nor->params) 2725 return -ENOMEM; 2726 2727 spi_nor_info_init_params(nor); 2728 2729 spi_nor_manufacturer_init_params(nor); 2730 2731 if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | 2732 SPI_NOR_OCTAL_READ | SPI_NOR_OCTAL_DTR_READ)) && 2733 !(nor->info->flags & SPI_NOR_SKIP_SFDP)) 2734 spi_nor_sfdp_init_params(nor); 2735 2736 spi_nor_post_sfdp_fixups(nor); 2737 2738 spi_nor_late_init_params(nor); 2739 2740 return 0; 2741 } 2742 2743 /** spi_nor_octal_dtr_enable() - enable Octal DTR I/O if needed 2744 * @nor: pointer to a 'struct spi_nor' 2745 * @enable: whether to enable or disable Octal DTR 2746 * 2747 * Return: 0 on success, -errno otherwise. 2748 */ 2749 static int spi_nor_octal_dtr_enable(struct spi_nor *nor, bool enable) 2750 { 2751 int ret; 2752 2753 if (!nor->params->octal_dtr_enable) 2754 return 0; 2755 2756 if (!(nor->read_proto == SNOR_PROTO_8_8_8_DTR && 2757 nor->write_proto == SNOR_PROTO_8_8_8_DTR)) 2758 return 0; 2759 2760 if (!(nor->flags & SNOR_F_IO_MODE_EN_VOLATILE)) 2761 return 0; 2762 2763 ret = nor->params->octal_dtr_enable(nor, enable); 2764 if (ret) 2765 return ret; 2766 2767 if (enable) 2768 nor->reg_proto = SNOR_PROTO_8_8_8_DTR; 2769 else 2770 nor->reg_proto = SNOR_PROTO_1_1_1; 2771 2772 return 0; 2773 } 2774 2775 /** 2776 * spi_nor_quad_enable() - enable Quad I/O if needed. 2777 * @nor: pointer to a 'struct spi_nor' 2778 * 2779 * Return: 0 on success, -errno otherwise. 2780 */ 2781 static int spi_nor_quad_enable(struct spi_nor *nor) 2782 { 2783 if (!nor->params->quad_enable) 2784 return 0; 2785 2786 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 || 2787 spi_nor_get_protocol_width(nor->write_proto) == 4)) 2788 return 0; 2789 2790 return nor->params->quad_enable(nor); 2791 } 2792 2793 static int spi_nor_init(struct spi_nor *nor) 2794 { 2795 int err; 2796 2797 err = spi_nor_octal_dtr_enable(nor, true); 2798 if (err) { 2799 dev_dbg(nor->dev, "octal mode not supported\n"); 2800 return err; 2801 } 2802 2803 err = spi_nor_quad_enable(nor); 2804 if (err) { 2805 dev_dbg(nor->dev, "quad mode not supported\n"); 2806 return err; 2807 } 2808 2809 /* 2810 * Some SPI NOR flashes are write protected by default after a power-on 2811 * reset cycle, in order to avoid inadvertent writes during power-up. 2812 * Backward compatibility imposes to unlock the entire flash memory 2813 * array at power-up by default. Depending on the kernel configuration 2814 * (1) do nothing, (2) always unlock the entire flash array or (3) 2815 * unlock the entire flash array only when the software write 2816 * protection bits are volatile. The latter is indicated by 2817 * SNOR_F_SWP_IS_VOLATILE. 2818 */ 2819 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE) || 2820 (IS_ENABLED(CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE) && 2821 nor->flags & SNOR_F_SWP_IS_VOLATILE)) 2822 spi_nor_try_unlock_all(nor); 2823 2824 if (nor->addr_width == 4 && 2825 nor->read_proto != SNOR_PROTO_8_8_8_DTR && 2826 !(nor->flags & SNOR_F_4B_OPCODES)) { 2827 /* 2828 * If the RESET# pin isn't hooked up properly, or the system 2829 * otherwise doesn't perform a reset command in the boot 2830 * sequence, it's impossible to 100% protect against unexpected 2831 * reboots (e.g., crashes). Warn the user (or hopefully, system 2832 * designer) that this is bad. 2833 */ 2834 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, 2835 "enabling reset hack; may not recover from unexpected reboots\n"); 2836 nor->params->set_4byte_addr_mode(nor, true); 2837 } 2838 2839 return 0; 2840 } 2841 2842 static void spi_nor_soft_reset(struct spi_nor *nor) 2843 { 2844 struct spi_mem_op op; 2845 int ret; 2846 2847 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRSTEN, 0), 2848 SPI_MEM_OP_NO_DUMMY, 2849 SPI_MEM_OP_NO_ADDR, 2850 SPI_MEM_OP_NO_DATA); 2851 2852 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 2853 2854 ret = spi_mem_exec_op(nor->spimem, &op); 2855 if (ret) { 2856 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 2857 return; 2858 } 2859 2860 op = (struct spi_mem_op)SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_SRST, 0), 2861 SPI_MEM_OP_NO_DUMMY, 2862 SPI_MEM_OP_NO_ADDR, 2863 SPI_MEM_OP_NO_DATA); 2864 2865 spi_nor_spimem_setup_op(nor, &op, nor->reg_proto); 2866 2867 ret = spi_mem_exec_op(nor->spimem, &op); 2868 if (ret) { 2869 dev_warn(nor->dev, "Software reset failed: %d\n", ret); 2870 return; 2871 } 2872 2873 /* 2874 * Software Reset is not instant, and the delay varies from flash to 2875 * flash. Looking at a few flashes, most range somewhere below 100 2876 * microseconds. So, sleep for a range of 200-400 us. 2877 */ 2878 usleep_range(SPI_NOR_SRST_SLEEP_MIN, SPI_NOR_SRST_SLEEP_MAX); 2879 } 2880 2881 /* mtd suspend handler */ 2882 static int spi_nor_suspend(struct mtd_info *mtd) 2883 { 2884 struct spi_nor *nor = mtd_to_spi_nor(mtd); 2885 int ret; 2886 2887 /* Disable octal DTR mode if we enabled it. */ 2888 ret = spi_nor_octal_dtr_enable(nor, false); 2889 if (ret) 2890 dev_err(nor->dev, "suspend() failed\n"); 2891 2892 return ret; 2893 } 2894 2895 /* mtd resume handler */ 2896 static void spi_nor_resume(struct mtd_info *mtd) 2897 { 2898 struct spi_nor *nor = mtd_to_spi_nor(mtd); 2899 struct device *dev = nor->dev; 2900 int ret; 2901 2902 /* re-initialize the nor chip */ 2903 ret = spi_nor_init(nor); 2904 if (ret) 2905 dev_err(dev, "resume() failed\n"); 2906 } 2907 2908 static int spi_nor_get_device(struct mtd_info *mtd) 2909 { 2910 struct mtd_info *master = mtd_get_master(mtd); 2911 struct spi_nor *nor = mtd_to_spi_nor(master); 2912 struct device *dev; 2913 2914 if (nor->spimem) 2915 dev = nor->spimem->spi->controller->dev.parent; 2916 else 2917 dev = nor->dev; 2918 2919 if (!try_module_get(dev->driver->owner)) 2920 return -ENODEV; 2921 2922 return 0; 2923 } 2924 2925 static void spi_nor_put_device(struct mtd_info *mtd) 2926 { 2927 struct mtd_info *master = mtd_get_master(mtd); 2928 struct spi_nor *nor = mtd_to_spi_nor(master); 2929 struct device *dev; 2930 2931 if (nor->spimem) 2932 dev = nor->spimem->spi->controller->dev.parent; 2933 else 2934 dev = nor->dev; 2935 2936 module_put(dev->driver->owner); 2937 } 2938 2939 void spi_nor_restore(struct spi_nor *nor) 2940 { 2941 /* restore the addressing mode */ 2942 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && 2943 nor->flags & SNOR_F_BROKEN_RESET) 2944 nor->params->set_4byte_addr_mode(nor, false); 2945 2946 if (nor->flags & SNOR_F_SOFT_RESET) 2947 spi_nor_soft_reset(nor); 2948 } 2949 EXPORT_SYMBOL_GPL(spi_nor_restore); 2950 2951 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor, 2952 const char *name) 2953 { 2954 unsigned int i, j; 2955 2956 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) { 2957 for (j = 0; j < manufacturers[i]->nparts; j++) { 2958 if (!strcmp(name, manufacturers[i]->parts[j].name)) { 2959 nor->manufacturer = manufacturers[i]; 2960 return &manufacturers[i]->parts[j]; 2961 } 2962 } 2963 } 2964 2965 return NULL; 2966 } 2967 2968 static int spi_nor_set_addr_width(struct spi_nor *nor) 2969 { 2970 if (nor->addr_width) { 2971 /* already configured from SFDP */ 2972 } else if (nor->read_proto == SNOR_PROTO_8_8_8_DTR) { 2973 /* 2974 * In 8D-8D-8D mode, one byte takes half a cycle to transfer. So 2975 * in this protocol an odd address width cannot be used because 2976 * then the address phase would only span a cycle and a half. 2977 * Half a cycle would be left over. We would then have to start 2978 * the dummy phase in the middle of a cycle and so too the data 2979 * phase, and we will end the transaction with half a cycle left 2980 * over. 2981 * 2982 * Force all 8D-8D-8D flashes to use an address width of 4 to 2983 * avoid this situation. 2984 */ 2985 nor->addr_width = 4; 2986 } else if (nor->info->addr_width) { 2987 nor->addr_width = nor->info->addr_width; 2988 } else { 2989 nor->addr_width = 3; 2990 } 2991 2992 if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) { 2993 /* enable 4-byte addressing if the device exceeds 16MiB */ 2994 nor->addr_width = 4; 2995 } 2996 2997 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { 2998 dev_dbg(nor->dev, "address width is too large: %u\n", 2999 nor->addr_width); 3000 return -EINVAL; 3001 } 3002 3003 /* Set 4byte opcodes when possible. */ 3004 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES && 3005 !(nor->flags & SNOR_F_HAS_4BAIT)) 3006 spi_nor_set_4byte_opcodes(nor); 3007 3008 return 0; 3009 } 3010 3011 static void spi_nor_debugfs_init(struct spi_nor *nor, 3012 const struct flash_info *info) 3013 { 3014 struct mtd_info *mtd = &nor->mtd; 3015 3016 mtd->dbg.partname = info->name; 3017 mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN", 3018 info->id_len, info->id); 3019 } 3020 3021 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor, 3022 const char *name) 3023 { 3024 const struct flash_info *info = NULL; 3025 3026 if (name) 3027 info = spi_nor_match_id(nor, name); 3028 /* Try to auto-detect if chip name wasn't specified or not found */ 3029 if (!info) 3030 info = spi_nor_read_id(nor); 3031 if (IS_ERR_OR_NULL(info)) 3032 return ERR_PTR(-ENOENT); 3033 3034 /* 3035 * If caller has specified name of flash model that can normally be 3036 * detected using JEDEC, let's verify it. 3037 */ 3038 if (name && info->id_len) { 3039 const struct flash_info *jinfo; 3040 3041 jinfo = spi_nor_read_id(nor); 3042 if (IS_ERR(jinfo)) { 3043 return jinfo; 3044 } else if (jinfo != info) { 3045 /* 3046 * JEDEC knows better, so overwrite platform ID. We 3047 * can't trust partitions any longer, but we'll let 3048 * mtd apply them anyway, since some partitions may be 3049 * marked read-only, and we don't want to lose that 3050 * information, even if it's not 100% accurate. 3051 */ 3052 dev_warn(nor->dev, "found %s, expected %s\n", 3053 jinfo->name, info->name); 3054 info = jinfo; 3055 } 3056 } 3057 3058 return info; 3059 } 3060 3061 int spi_nor_scan(struct spi_nor *nor, const char *name, 3062 const struct spi_nor_hwcaps *hwcaps) 3063 { 3064 const struct flash_info *info; 3065 struct device *dev = nor->dev; 3066 struct mtd_info *mtd = &nor->mtd; 3067 struct device_node *np = spi_nor_get_flash_node(nor); 3068 int ret; 3069 int i; 3070 3071 ret = spi_nor_check(nor); 3072 if (ret) 3073 return ret; 3074 3075 /* Reset SPI protocol for all commands. */ 3076 nor->reg_proto = SNOR_PROTO_1_1_1; 3077 nor->read_proto = SNOR_PROTO_1_1_1; 3078 nor->write_proto = SNOR_PROTO_1_1_1; 3079 3080 /* 3081 * We need the bounce buffer early to read/write registers when going 3082 * through the spi-mem layer (buffers have to be DMA-able). 3083 * For spi-mem drivers, we'll reallocate a new buffer if 3084 * nor->page_size turns out to be greater than PAGE_SIZE (which 3085 * shouldn't happen before long since NOR pages are usually less 3086 * than 1KB) after spi_nor_scan() returns. 3087 */ 3088 nor->bouncebuf_size = PAGE_SIZE; 3089 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size, 3090 GFP_KERNEL); 3091 if (!nor->bouncebuf) 3092 return -ENOMEM; 3093 3094 info = spi_nor_get_flash_info(nor, name); 3095 if (IS_ERR(info)) 3096 return PTR_ERR(info); 3097 3098 nor->info = info; 3099 3100 spi_nor_debugfs_init(nor, info); 3101 3102 mutex_init(&nor->lock); 3103 3104 /* 3105 * Make sure the XSR_RDY flag is set before calling 3106 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR 3107 * with Atmel SPI NOR. 3108 */ 3109 if (info->flags & SPI_NOR_XSR_RDY) 3110 nor->flags |= SNOR_F_READY_XSR_RDY; 3111 3112 if (info->flags & SPI_NOR_HAS_LOCK) 3113 nor->flags |= SNOR_F_HAS_LOCK; 3114 3115 mtd->_write = spi_nor_write; 3116 3117 /* Init flash parameters based on flash_info struct and SFDP */ 3118 ret = spi_nor_init_params(nor); 3119 if (ret) 3120 return ret; 3121 3122 if (!mtd->name) 3123 mtd->name = dev_name(dev); 3124 mtd->priv = nor; 3125 mtd->type = MTD_NORFLASH; 3126 mtd->writesize = nor->params->writesize; 3127 mtd->flags = MTD_CAP_NORFLASH; 3128 mtd->size = nor->params->size; 3129 mtd->_erase = spi_nor_erase; 3130 mtd->_read = spi_nor_read; 3131 mtd->_suspend = spi_nor_suspend; 3132 mtd->_resume = spi_nor_resume; 3133 mtd->_get_device = spi_nor_get_device; 3134 mtd->_put_device = spi_nor_put_device; 3135 3136 if (info->flags & USE_FSR) 3137 nor->flags |= SNOR_F_USE_FSR; 3138 if (info->flags & SPI_NOR_HAS_TB) { 3139 nor->flags |= SNOR_F_HAS_SR_TB; 3140 if (info->flags & SPI_NOR_TB_SR_BIT6) 3141 nor->flags |= SNOR_F_HAS_SR_TB_BIT6; 3142 } 3143 3144 if (info->flags & NO_CHIP_ERASE) 3145 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; 3146 if (info->flags & USE_CLSR) 3147 nor->flags |= SNOR_F_USE_CLSR; 3148 if (info->flags & SPI_NOR_SWP_IS_VOLATILE) 3149 nor->flags |= SNOR_F_SWP_IS_VOLATILE; 3150 3151 if (info->flags & SPI_NOR_4BIT_BP) { 3152 nor->flags |= SNOR_F_HAS_4BIT_BP; 3153 if (info->flags & SPI_NOR_BP3_SR_BIT6) 3154 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6; 3155 } 3156 3157 if (info->flags & SPI_NOR_NO_ERASE) 3158 mtd->flags |= MTD_NO_ERASE; 3159 3160 mtd->dev.parent = dev; 3161 nor->page_size = nor->params->page_size; 3162 mtd->writebufsize = nor->page_size; 3163 3164 if (of_property_read_bool(np, "broken-flash-reset")) 3165 nor->flags |= SNOR_F_BROKEN_RESET; 3166 3167 /* 3168 * Configure the SPI memory: 3169 * - select op codes for (Fast) Read, Page Program and Sector Erase. 3170 * - set the number of dummy cycles (mode cycles + wait states). 3171 * - set the SPI protocols for register and memory accesses. 3172 */ 3173 ret = spi_nor_setup(nor, hwcaps); 3174 if (ret) 3175 return ret; 3176 3177 if (info->flags & SPI_NOR_4B_OPCODES) 3178 nor->flags |= SNOR_F_4B_OPCODES; 3179 3180 if (info->flags & SPI_NOR_IO_MODE_EN_VOLATILE) 3181 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; 3182 3183 ret = spi_nor_set_addr_width(nor); 3184 if (ret) 3185 return ret; 3186 3187 spi_nor_register_locking_ops(nor); 3188 3189 /* Send all the required SPI flash commands to initialize device */ 3190 ret = spi_nor_init(nor); 3191 if (ret) 3192 return ret; 3193 3194 /* Configure OTP parameters and ops */ 3195 spi_nor_otp_init(nor); 3196 3197 dev_info(dev, "%s (%lld Kbytes)\n", info->name, 3198 (long long)mtd->size >> 10); 3199 3200 dev_dbg(dev, 3201 "mtd .name = %s, .size = 0x%llx (%lldMiB), " 3202 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", 3203 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20), 3204 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions); 3205 3206 if (mtd->numeraseregions) 3207 for (i = 0; i < mtd->numeraseregions; i++) 3208 dev_dbg(dev, 3209 "mtd.eraseregions[%d] = { .offset = 0x%llx, " 3210 ".erasesize = 0x%.8x (%uKiB), " 3211 ".numblocks = %d }\n", 3212 i, (long long)mtd->eraseregions[i].offset, 3213 mtd->eraseregions[i].erasesize, 3214 mtd->eraseregions[i].erasesize / 1024, 3215 mtd->eraseregions[i].numblocks); 3216 return 0; 3217 } 3218 EXPORT_SYMBOL_GPL(spi_nor_scan); 3219 3220 static int spi_nor_create_read_dirmap(struct spi_nor *nor) 3221 { 3222 struct spi_mem_dirmap_info info = { 3223 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 0), 3224 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), 3225 SPI_MEM_OP_DUMMY(nor->read_dummy, 0), 3226 SPI_MEM_OP_DATA_IN(0, NULL, 0)), 3227 .offset = 0, 3228 .length = nor->mtd.size, 3229 }; 3230 struct spi_mem_op *op = &info.op_tmpl; 3231 3232 spi_nor_spimem_setup_op(nor, op, nor->read_proto); 3233 3234 /* convert the dummy cycles to the number of bytes */ 3235 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8; 3236 if (spi_nor_protocol_is_dtr(nor->read_proto)) 3237 op->dummy.nbytes *= 2; 3238 3239 /* 3240 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3241 * of data bytes is non-zero, the data buswidth won't be set here. So, 3242 * do it explicitly. 3243 */ 3244 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); 3245 3246 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3247 &info); 3248 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc); 3249 } 3250 3251 static int spi_nor_create_write_dirmap(struct spi_nor *nor) 3252 { 3253 struct spi_mem_dirmap_info info = { 3254 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 0), 3255 SPI_MEM_OP_ADDR(nor->addr_width, 0, 0), 3256 SPI_MEM_OP_NO_DUMMY, 3257 SPI_MEM_OP_DATA_OUT(0, NULL, 0)), 3258 .offset = 0, 3259 .length = nor->mtd.size, 3260 }; 3261 struct spi_mem_op *op = &info.op_tmpl; 3262 3263 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 3264 op->addr.nbytes = 0; 3265 3266 spi_nor_spimem_setup_op(nor, op, nor->write_proto); 3267 3268 /* 3269 * Since spi_nor_spimem_setup_op() only sets buswidth when the number 3270 * of data bytes is non-zero, the data buswidth won't be set here. So, 3271 * do it explicitly. 3272 */ 3273 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); 3274 3275 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem, 3276 &info); 3277 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc); 3278 } 3279 3280 static int spi_nor_probe(struct spi_mem *spimem) 3281 { 3282 struct spi_device *spi = spimem->spi; 3283 struct flash_platform_data *data = dev_get_platdata(&spi->dev); 3284 struct spi_nor *nor; 3285 /* 3286 * Enable all caps by default. The core will mask them after 3287 * checking what's really supported using spi_mem_supports_op(). 3288 */ 3289 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL }; 3290 char *flash_name; 3291 int ret; 3292 3293 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL); 3294 if (!nor) 3295 return -ENOMEM; 3296 3297 nor->spimem = spimem; 3298 nor->dev = &spi->dev; 3299 spi_nor_set_flash_node(nor, spi->dev.of_node); 3300 3301 spi_mem_set_drvdata(spimem, nor); 3302 3303 if (data && data->name) 3304 nor->mtd.name = data->name; 3305 3306 if (!nor->mtd.name) 3307 nor->mtd.name = spi_mem_get_name(spimem); 3308 3309 /* 3310 * For some (historical?) reason many platforms provide two different 3311 * names in flash_platform_data: "name" and "type". Quite often name is 3312 * set to "m25p80" and then "type" provides a real chip name. 3313 * If that's the case, respect "type" and ignore a "name". 3314 */ 3315 if (data && data->type) 3316 flash_name = data->type; 3317 else if (!strcmp(spi->modalias, "spi-nor")) 3318 flash_name = NULL; /* auto-detect */ 3319 else 3320 flash_name = spi->modalias; 3321 3322 ret = spi_nor_scan(nor, flash_name, &hwcaps); 3323 if (ret) 3324 return ret; 3325 3326 /* 3327 * None of the existing parts have > 512B pages, but let's play safe 3328 * and add this logic so that if anyone ever adds support for such 3329 * a NOR we don't end up with buffer overflows. 3330 */ 3331 if (nor->page_size > PAGE_SIZE) { 3332 nor->bouncebuf_size = nor->page_size; 3333 devm_kfree(nor->dev, nor->bouncebuf); 3334 nor->bouncebuf = devm_kmalloc(nor->dev, 3335 nor->bouncebuf_size, 3336 GFP_KERNEL); 3337 if (!nor->bouncebuf) 3338 return -ENOMEM; 3339 } 3340 3341 ret = spi_nor_create_read_dirmap(nor); 3342 if (ret) 3343 return ret; 3344 3345 ret = spi_nor_create_write_dirmap(nor); 3346 if (ret) 3347 return ret; 3348 3349 return mtd_device_register(&nor->mtd, data ? data->parts : NULL, 3350 data ? data->nr_parts : 0); 3351 } 3352 3353 static int spi_nor_remove(struct spi_mem *spimem) 3354 { 3355 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3356 3357 spi_nor_restore(nor); 3358 3359 /* Clean up MTD stuff. */ 3360 return mtd_device_unregister(&nor->mtd); 3361 } 3362 3363 static void spi_nor_shutdown(struct spi_mem *spimem) 3364 { 3365 struct spi_nor *nor = spi_mem_get_drvdata(spimem); 3366 3367 spi_nor_restore(nor); 3368 } 3369 3370 /* 3371 * Do NOT add to this array without reading the following: 3372 * 3373 * Historically, many flash devices are bound to this driver by their name. But 3374 * since most of these flash are compatible to some extent, and their 3375 * differences can often be differentiated by the JEDEC read-ID command, we 3376 * encourage new users to add support to the spi-nor library, and simply bind 3377 * against a generic string here (e.g., "jedec,spi-nor"). 3378 * 3379 * Many flash names are kept here in this list (as well as in spi-nor.c) to 3380 * keep them available as module aliases for existing platforms. 3381 */ 3382 static const struct spi_device_id spi_nor_dev_ids[] = { 3383 /* 3384 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and 3385 * hack around the fact that the SPI core does not provide uevent 3386 * matching for .of_match_table 3387 */ 3388 {"spi-nor"}, 3389 3390 /* 3391 * Entries not used in DTs that should be safe to drop after replacing 3392 * them with "spi-nor" in platform data. 3393 */ 3394 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"}, 3395 3396 /* 3397 * Entries that were used in DTs without "jedec,spi-nor" fallback and 3398 * should be kept for backward compatibility. 3399 */ 3400 {"at25df321a"}, {"at25df641"}, {"at26df081a"}, 3401 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"}, 3402 {"mx25l25635e"},{"mx66l51235l"}, 3403 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"}, 3404 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"}, 3405 {"s25fl064k"}, 3406 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"}, 3407 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"}, 3408 {"m25p64"}, {"m25p128"}, 3409 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, 3410 {"w25q80bl"}, {"w25q128"}, {"w25q256"}, 3411 3412 /* Flashes that can't be detected using JEDEC */ 3413 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, 3414 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, 3415 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, 3416 3417 /* Everspin MRAMs (non-JEDEC) */ 3418 { "mr25h128" }, /* 128 Kib, 40 MHz */ 3419 { "mr25h256" }, /* 256 Kib, 40 MHz */ 3420 { "mr25h10" }, /* 1 Mib, 40 MHz */ 3421 { "mr25h40" }, /* 4 Mib, 40 MHz */ 3422 3423 { }, 3424 }; 3425 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids); 3426 3427 static const struct of_device_id spi_nor_of_table[] = { 3428 /* 3429 * Generic compatibility for SPI NOR that can be identified by the 3430 * JEDEC READ ID opcode (0x9F). Use this, if possible. 3431 */ 3432 { .compatible = "jedec,spi-nor" }, 3433 { /* sentinel */ }, 3434 }; 3435 MODULE_DEVICE_TABLE(of, spi_nor_of_table); 3436 3437 /* 3438 * REVISIT: many of these chips have deep power-down modes, which 3439 * should clearly be entered on suspend() to minimize power use. 3440 * And also when they're otherwise idle... 3441 */ 3442 static struct spi_mem_driver spi_nor_driver = { 3443 .spidrv = { 3444 .driver = { 3445 .name = "spi-nor", 3446 .of_match_table = spi_nor_of_table, 3447 }, 3448 .id_table = spi_nor_dev_ids, 3449 }, 3450 .probe = spi_nor_probe, 3451 .remove = spi_nor_remove, 3452 .shutdown = spi_nor_shutdown, 3453 }; 3454 module_spi_mem_driver(spi_nor_driver); 3455 3456 MODULE_LICENSE("GPL v2"); 3457 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); 3458 MODULE_AUTHOR("Mike Lavender"); 3459 MODULE_DESCRIPTION("framework for SPI NOR"); 3460