1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2005, Intec Automation Inc. 4 * Copyright (C) 2014, Freescale Semiconductor, Inc. 5 */ 6 7 #include <linux/bitfield.h> 8 #include <linux/slab.h> 9 #include <linux/sort.h> 10 #include <linux/mtd/spi-nor.h> 11 12 #include "core.h" 13 14 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb) 15 #define SFDP_PARAM_HEADER_PTP(p) \ 16 (((p)->parameter_table_pointer[2] << 16) | \ 17 ((p)->parameter_table_pointer[1] << 8) | \ 18 ((p)->parameter_table_pointer[0] << 0)) 19 20 #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ 21 #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ 22 #define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */ 23 #define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */ 24 #define SFDP_SCCR_MAP_ID 0xff87 /* 25 * Status, Control and Configuration 26 * Register Map. 27 */ 28 29 #define SFDP_SIGNATURE 0x50444653U 30 31 struct sfdp_header { 32 u32 signature; /* Ox50444653U <=> "SFDP" */ 33 u8 minor; 34 u8 major; 35 u8 nph; /* 0-base number of parameter headers */ 36 u8 unused; 37 38 /* Basic Flash Parameter Table. */ 39 struct sfdp_parameter_header bfpt_header; 40 }; 41 42 /* Fast Read settings. */ 43 struct sfdp_bfpt_read { 44 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */ 45 u32 hwcaps; 46 47 /* 48 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us 49 * whether the Fast Read x-y-z command is supported. 50 */ 51 u32 supported_dword; 52 u32 supported_bit; 53 54 /* 55 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD 56 * encodes the op code, the number of mode clocks and the number of wait 57 * states to be used by Fast Read x-y-z command. 58 */ 59 u32 settings_dword; 60 u32 settings_shift; 61 62 /* The SPI protocol for this Fast Read x-y-z command. */ 63 enum spi_nor_protocol proto; 64 }; 65 66 struct sfdp_bfpt_erase { 67 /* 68 * The half-word at offset <shift> in DWORD <dword> encodes the 69 * op code and erase sector size to be used by Sector Erase commands. 70 */ 71 u32 dword; 72 u32 shift; 73 }; 74 75 #define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22) 76 #define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22) 77 #define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22) 78 #define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22) 79 #define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22) 80 81 #define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16) 82 #define SMPT_CMD_READ_DUMMY_SHIFT 16 83 #define SMPT_CMD_READ_DUMMY(_cmd) \ 84 (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT) 85 #define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL 86 87 #define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24) 88 #define SMPT_CMD_READ_DATA_SHIFT 24 89 #define SMPT_CMD_READ_DATA(_cmd) \ 90 (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT) 91 92 #define SMPT_CMD_OPCODE_MASK GENMASK(15, 8) 93 #define SMPT_CMD_OPCODE_SHIFT 8 94 #define SMPT_CMD_OPCODE(_cmd) \ 95 (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT) 96 97 #define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16) 98 #define SMPT_MAP_REGION_COUNT_SHIFT 16 99 #define SMPT_MAP_REGION_COUNT(_header) \ 100 ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \ 101 SMPT_MAP_REGION_COUNT_SHIFT) + 1) 102 103 #define SMPT_MAP_ID_MASK GENMASK(15, 8) 104 #define SMPT_MAP_ID_SHIFT 8 105 #define SMPT_MAP_ID(_header) \ 106 (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT) 107 108 #define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8) 109 #define SMPT_MAP_REGION_SIZE_SHIFT 8 110 #define SMPT_MAP_REGION_SIZE(_region) \ 111 (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \ 112 SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256) 113 114 #define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0) 115 #define SMPT_MAP_REGION_ERASE_TYPE(_region) \ 116 ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK) 117 118 #define SMPT_DESC_TYPE_MAP BIT(1) 119 #define SMPT_DESC_END BIT(0) 120 121 #define SFDP_4BAIT_DWORD_MAX 2 122 123 struct sfdp_4bait { 124 /* The hardware capability. */ 125 u32 hwcaps; 126 127 /* 128 * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether 129 * the associated 4-byte address op code is supported. 130 */ 131 u32 supported_bit; 132 }; 133 134 /** 135 * spi_nor_read_raw() - raw read of serial flash memory. read_opcode, 136 * addr_width and read_dummy members of the struct spi_nor 137 * should be previously 138 * set. 139 * @nor: pointer to a 'struct spi_nor' 140 * @addr: offset in the serial flash memory 141 * @len: number of bytes to read 142 * @buf: buffer where the data is copied into (dma-safe memory) 143 * 144 * Return: 0 on success, -errno otherwise. 145 */ 146 static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) 147 { 148 ssize_t ret; 149 150 while (len) { 151 ret = spi_nor_read_data(nor, addr, len, buf); 152 if (ret < 0) 153 return ret; 154 if (!ret || ret > len) 155 return -EIO; 156 157 buf += ret; 158 addr += ret; 159 len -= ret; 160 } 161 return 0; 162 } 163 164 /** 165 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters. 166 * @nor: pointer to a 'struct spi_nor' 167 * @addr: offset in the SFDP area to start reading data from 168 * @len: number of bytes to read 169 * @buf: buffer where the SFDP data are copied into (dma-safe memory) 170 * 171 * Whatever the actual numbers of bytes for address and dummy cycles are 172 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always 173 * followed by a 3-byte address and 8 dummy clock cycles. 174 * 175 * Return: 0 on success, -errno otherwise. 176 */ 177 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, 178 size_t len, void *buf) 179 { 180 u8 addr_width, read_opcode, read_dummy; 181 int ret; 182 183 read_opcode = nor->read_opcode; 184 addr_width = nor->addr_width; 185 read_dummy = nor->read_dummy; 186 187 nor->read_opcode = SPINOR_OP_RDSFDP; 188 nor->addr_width = 3; 189 nor->read_dummy = 8; 190 191 ret = spi_nor_read_raw(nor, addr, len, buf); 192 193 nor->read_opcode = read_opcode; 194 nor->addr_width = addr_width; 195 nor->read_dummy = read_dummy; 196 197 return ret; 198 } 199 200 /** 201 * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. 202 * @nor: pointer to a 'struct spi_nor' 203 * @addr: offset in the SFDP area to start reading data from 204 * @len: number of bytes to read 205 * @buf: buffer where the SFDP data are copied into 206 * 207 * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not 208 * guaranteed to be dma-safe. 209 * 210 * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() 211 * otherwise. 212 */ 213 static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, 214 size_t len, void *buf) 215 { 216 void *dma_safe_buf; 217 int ret; 218 219 dma_safe_buf = kmalloc(len, GFP_KERNEL); 220 if (!dma_safe_buf) 221 return -ENOMEM; 222 223 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); 224 memcpy(buf, dma_safe_buf, len); 225 kfree(dma_safe_buf); 226 227 return ret; 228 } 229 230 static void 231 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read, 232 u16 half, 233 enum spi_nor_protocol proto) 234 { 235 read->num_mode_clocks = (half >> 5) & 0x07; 236 read->num_wait_states = (half >> 0) & 0x1f; 237 read->opcode = (half >> 8) & 0xff; 238 read->proto = proto; 239 } 240 241 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = { 242 /* Fast Read 1-1-2 */ 243 { 244 SNOR_HWCAPS_READ_1_1_2, 245 BFPT_DWORD(1), BIT(16), /* Supported bit */ 246 BFPT_DWORD(4), 0, /* Settings */ 247 SNOR_PROTO_1_1_2, 248 }, 249 250 /* Fast Read 1-2-2 */ 251 { 252 SNOR_HWCAPS_READ_1_2_2, 253 BFPT_DWORD(1), BIT(20), /* Supported bit */ 254 BFPT_DWORD(4), 16, /* Settings */ 255 SNOR_PROTO_1_2_2, 256 }, 257 258 /* Fast Read 2-2-2 */ 259 { 260 SNOR_HWCAPS_READ_2_2_2, 261 BFPT_DWORD(5), BIT(0), /* Supported bit */ 262 BFPT_DWORD(6), 16, /* Settings */ 263 SNOR_PROTO_2_2_2, 264 }, 265 266 /* Fast Read 1-1-4 */ 267 { 268 SNOR_HWCAPS_READ_1_1_4, 269 BFPT_DWORD(1), BIT(22), /* Supported bit */ 270 BFPT_DWORD(3), 16, /* Settings */ 271 SNOR_PROTO_1_1_4, 272 }, 273 274 /* Fast Read 1-4-4 */ 275 { 276 SNOR_HWCAPS_READ_1_4_4, 277 BFPT_DWORD(1), BIT(21), /* Supported bit */ 278 BFPT_DWORD(3), 0, /* Settings */ 279 SNOR_PROTO_1_4_4, 280 }, 281 282 /* Fast Read 4-4-4 */ 283 { 284 SNOR_HWCAPS_READ_4_4_4, 285 BFPT_DWORD(5), BIT(4), /* Supported bit */ 286 BFPT_DWORD(7), 16, /* Settings */ 287 SNOR_PROTO_4_4_4, 288 }, 289 }; 290 291 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = { 292 /* Erase Type 1 in DWORD8 bits[15:0] */ 293 {BFPT_DWORD(8), 0}, 294 295 /* Erase Type 2 in DWORD8 bits[31:16] */ 296 {BFPT_DWORD(8), 16}, 297 298 /* Erase Type 3 in DWORD9 bits[15:0] */ 299 {BFPT_DWORD(9), 0}, 300 301 /* Erase Type 4 in DWORD9 bits[31:16] */ 302 {BFPT_DWORD(9), 16}, 303 }; 304 305 /** 306 * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT 307 * @erase: pointer to a structure that describes a SPI NOR erase type 308 * @size: the size of the sector/block erased by the erase type 309 * @opcode: the SPI command op code to erase the sector/block 310 * @i: erase type index as sorted in the Basic Flash Parameter Table 311 * 312 * The supported Erase Types will be sorted at init in ascending order, with 313 * the smallest Erase Type size being the first member in the erase_type array 314 * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in 315 * the Basic Flash Parameter Table since it will be used later on to 316 * synchronize with the supported Erase Types defined in SFDP optional tables. 317 */ 318 static void 319 spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase, 320 u32 size, u8 opcode, u8 i) 321 { 322 erase->idx = i; 323 spi_nor_set_erase_type(erase, size, opcode); 324 } 325 326 /** 327 * spi_nor_map_cmp_erase_type() - compare the map's erase types by size 328 * @l: member in the left half of the map's erase_type array 329 * @r: member in the right half of the map's erase_type array 330 * 331 * Comparison function used in the sort() call to sort in ascending order the 332 * map's erase types, the smallest erase type size being the first member in the 333 * sorted erase_type array. 334 * 335 * Return: the result of @l->size - @r->size 336 */ 337 static int spi_nor_map_cmp_erase_type(const void *l, const void *r) 338 { 339 const struct spi_nor_erase_type *left = l, *right = r; 340 341 return left->size - right->size; 342 } 343 344 /** 345 * spi_nor_sort_erase_mask() - sort erase mask 346 * @map: the erase map of the SPI NOR 347 * @erase_mask: the erase type mask to be sorted 348 * 349 * Replicate the sort done for the map's erase types in BFPT: sort the erase 350 * mask in ascending order with the smallest erase type size starting from 351 * BIT(0) in the sorted erase mask. 352 * 353 * Return: sorted erase mask. 354 */ 355 static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask) 356 { 357 struct spi_nor_erase_type *erase_type = map->erase_type; 358 int i; 359 u8 sorted_erase_mask = 0; 360 361 if (!erase_mask) 362 return 0; 363 364 /* Replicate the sort done for the map's erase types. */ 365 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) 366 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx)) 367 sorted_erase_mask |= BIT(i); 368 369 return sorted_erase_mask; 370 } 371 372 /** 373 * spi_nor_regions_sort_erase_types() - sort erase types in each region 374 * @map: the erase map of the SPI NOR 375 * 376 * Function assumes that the erase types defined in the erase map are already 377 * sorted in ascending order, with the smallest erase type size being the first 378 * member in the erase_type array. It replicates the sort done for the map's 379 * erase types. Each region's erase bitmask will indicate which erase types are 380 * supported from the sorted erase types defined in the erase map. 381 * Sort the all region's erase type at init in order to speed up the process of 382 * finding the best erase command at runtime. 383 */ 384 static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) 385 { 386 struct spi_nor_erase_region *region = map->regions; 387 u8 region_erase_mask, sorted_erase_mask; 388 389 while (region) { 390 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 391 392 sorted_erase_mask = spi_nor_sort_erase_mask(map, 393 region_erase_mask); 394 395 /* Overwrite erase mask. */ 396 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | 397 sorted_erase_mask; 398 399 region = spi_nor_region_next(region); 400 } 401 } 402 403 /** 404 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table. 405 * @nor: pointer to a 'struct spi_nor' 406 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing 407 * the Basic Flash Parameter Table length and version 408 * @params: pointer to the 'struct spi_nor_flash_parameter' to be 409 * filled 410 * 411 * The Basic Flash Parameter Table is the main and only mandatory table as 412 * defined by the SFDP (JESD216) specification. 413 * It provides us with the total size (memory density) of the data array and 414 * the number of address bytes for Fast Read, Page Program and Sector Erase 415 * commands. 416 * For Fast READ commands, it also gives the number of mode clock cycles and 417 * wait states (regrouped in the number of dummy clock cycles) for each 418 * supported instruction op code. 419 * For Page Program, the page size is now available since JESD216 rev A, however 420 * the supported instruction op codes are still not provided. 421 * For Sector Erase commands, this table stores the supported instruction op 422 * codes and the associated sector sizes. 423 * Finally, the Quad Enable Requirements (QER) are also available since JESD216 424 * rev A. The QER bits encode the manufacturer dependent procedure to be 425 * executed to set the Quad Enable (QE) bit in some internal register of the 426 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before 427 * sending any Quad SPI command to the memory. Actually, setting the QE bit 428 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2 429 * and IO3 hence enabling 4 (Quad) I/O lines. 430 * 431 * Return: 0 on success, -errno otherwise. 432 */ 433 static int spi_nor_parse_bfpt(struct spi_nor *nor, 434 const struct sfdp_parameter_header *bfpt_header, 435 struct spi_nor_flash_parameter *params) 436 { 437 struct spi_nor_erase_map *map = ¶ms->erase_map; 438 struct spi_nor_erase_type *erase_type = map->erase_type; 439 struct sfdp_bfpt bfpt; 440 size_t len; 441 int i, cmd, err; 442 u32 addr, val; 443 u16 half; 444 u8 erase_mask; 445 446 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */ 447 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216) 448 return -EINVAL; 449 450 /* Read the Basic Flash Parameter Table. */ 451 len = min_t(size_t, sizeof(bfpt), 452 bfpt_header->length * sizeof(u32)); 453 addr = SFDP_PARAM_HEADER_PTP(bfpt_header); 454 memset(&bfpt, 0, sizeof(bfpt)); 455 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); 456 if (err < 0) 457 return err; 458 459 /* Fix endianness of the BFPT DWORDs. */ 460 le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX); 461 462 /* Number of address bytes. */ 463 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { 464 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: 465 case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4: 466 nor->addr_width = 3; 467 break; 468 469 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY: 470 nor->addr_width = 4; 471 break; 472 473 default: 474 break; 475 } 476 477 /* Flash Memory Density (in bits). */ 478 val = bfpt.dwords[BFPT_DWORD(2)]; 479 if (val & BIT(31)) { 480 val &= ~BIT(31); 481 482 /* 483 * Prevent overflows on params->size. Anyway, a NOR of 2^64 484 * bits is unlikely to exist so this error probably means 485 * the BFPT we are reading is corrupted/wrong. 486 */ 487 if (val > 63) 488 return -EINVAL; 489 490 params->size = 1ULL << val; 491 } else { 492 params->size = val + 1; 493 } 494 params->size >>= 3; /* Convert to bytes. */ 495 496 /* Fast Read settings. */ 497 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) { 498 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i]; 499 struct spi_nor_read_command *read; 500 501 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) { 502 params->hwcaps.mask &= ~rd->hwcaps; 503 continue; 504 } 505 506 params->hwcaps.mask |= rd->hwcaps; 507 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps); 508 read = ¶ms->reads[cmd]; 509 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift; 510 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto); 511 } 512 513 /* 514 * Sector Erase settings. Reinitialize the uniform erase map using the 515 * Erase Types defined in the bfpt table. 516 */ 517 erase_mask = 0; 518 memset(¶ms->erase_map, 0, sizeof(params->erase_map)); 519 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) { 520 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i]; 521 u32 erasesize; 522 u8 opcode; 523 524 half = bfpt.dwords[er->dword] >> er->shift; 525 erasesize = half & 0xff; 526 527 /* erasesize == 0 means this Erase Type is not supported. */ 528 if (!erasesize) 529 continue; 530 531 erasesize = 1U << erasesize; 532 opcode = (half >> 8) & 0xff; 533 erase_mask |= BIT(i); 534 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize, 535 opcode, i); 536 } 537 spi_nor_init_uniform_erase_map(map, erase_mask, params->size); 538 /* 539 * Sort all the map's Erase Types in ascending order with the smallest 540 * erase size being the first member in the erase_type array. 541 */ 542 sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]), 543 spi_nor_map_cmp_erase_type, NULL); 544 /* 545 * Sort the erase types in the uniform region in order to update the 546 * uniform_erase_type bitmask. The bitmask will be used later on when 547 * selecting the uniform erase. 548 */ 549 spi_nor_regions_sort_erase_types(map); 550 map->uniform_erase_type = map->uniform_region.offset & 551 SNOR_ERASE_TYPE_MASK; 552 553 /* Stop here if not JESD216 rev A or later. */ 554 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216) 555 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, 556 params); 557 558 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ 559 val = bfpt.dwords[BFPT_DWORD(11)]; 560 val &= BFPT_DWORD11_PAGE_SIZE_MASK; 561 val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; 562 params->page_size = 1U << val; 563 564 /* Quad Enable Requirements. */ 565 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) { 566 case BFPT_DWORD15_QER_NONE: 567 params->quad_enable = NULL; 568 break; 569 570 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY: 571 /* 572 * Writing only one byte to the Status Register has the 573 * side-effect of clearing Status Register 2. 574 */ 575 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD: 576 /* 577 * Read Configuration Register (35h) instruction is not 578 * supported. 579 */ 580 nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR; 581 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 582 break; 583 584 case BFPT_DWORD15_QER_SR1_BIT6: 585 nor->flags &= ~SNOR_F_HAS_16BIT_SR; 586 params->quad_enable = spi_nor_sr1_bit6_quad_enable; 587 break; 588 589 case BFPT_DWORD15_QER_SR2_BIT7: 590 nor->flags &= ~SNOR_F_HAS_16BIT_SR; 591 params->quad_enable = spi_nor_sr2_bit7_quad_enable; 592 break; 593 594 case BFPT_DWORD15_QER_SR2_BIT1: 595 /* 596 * JESD216 rev B or later does not specify if writing only one 597 * byte to the Status Register clears or not the Status 598 * Register 2, so let's be cautious and keep the default 599 * assumption of a 16-bit Write Status (01h) command. 600 */ 601 nor->flags |= SNOR_F_HAS_16BIT_SR; 602 603 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 604 break; 605 606 default: 607 dev_dbg(nor->dev, "BFPT QER reserved value used\n"); 608 break; 609 } 610 611 /* Soft Reset support. */ 612 if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST) 613 nor->flags |= SNOR_F_SOFT_RESET; 614 615 /* Stop here if not JESD216 rev C or later. */ 616 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B) 617 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, 618 params); 619 /* 8D-8D-8D command extension. */ 620 switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) { 621 case BFPT_DWORD18_CMD_EXT_REP: 622 nor->cmd_ext_type = SPI_NOR_EXT_REPEAT; 623 break; 624 625 case BFPT_DWORD18_CMD_EXT_INV: 626 nor->cmd_ext_type = SPI_NOR_EXT_INVERT; 627 break; 628 629 case BFPT_DWORD18_CMD_EXT_RES: 630 dev_dbg(nor->dev, "Reserved command extension used\n"); 631 break; 632 633 case BFPT_DWORD18_CMD_EXT_16B: 634 dev_dbg(nor->dev, "16-bit opcodes not supported\n"); 635 return -EOPNOTSUPP; 636 } 637 638 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt, params); 639 } 640 641 /** 642 * spi_nor_smpt_addr_width() - return the address width used in the 643 * configuration detection command. 644 * @nor: pointer to a 'struct spi_nor' 645 * @settings: configuration detection command descriptor, dword1 646 */ 647 static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings) 648 { 649 switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) { 650 case SMPT_CMD_ADDRESS_LEN_0: 651 return 0; 652 case SMPT_CMD_ADDRESS_LEN_3: 653 return 3; 654 case SMPT_CMD_ADDRESS_LEN_4: 655 return 4; 656 case SMPT_CMD_ADDRESS_LEN_USE_CURRENT: 657 default: 658 return nor->addr_width; 659 } 660 } 661 662 /** 663 * spi_nor_smpt_read_dummy() - return the configuration detection command read 664 * latency, in clock cycles. 665 * @nor: pointer to a 'struct spi_nor' 666 * @settings: configuration detection command descriptor, dword1 667 * 668 * Return: the number of dummy cycles for an SMPT read 669 */ 670 static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings) 671 { 672 u8 read_dummy = SMPT_CMD_READ_DUMMY(settings); 673 674 if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE) 675 return nor->read_dummy; 676 return read_dummy; 677 } 678 679 /** 680 * spi_nor_get_map_in_use() - get the configuration map in use 681 * @nor: pointer to a 'struct spi_nor' 682 * @smpt: pointer to the sector map parameter table 683 * @smpt_len: sector map parameter table length 684 * 685 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise. 686 */ 687 static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, 688 u8 smpt_len) 689 { 690 const u32 *ret; 691 u8 *buf; 692 u32 addr; 693 int err; 694 u8 i; 695 u8 addr_width, read_opcode, read_dummy; 696 u8 read_data_mask, map_id; 697 698 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ 699 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 700 if (!buf) 701 return ERR_PTR(-ENOMEM); 702 703 addr_width = nor->addr_width; 704 read_dummy = nor->read_dummy; 705 read_opcode = nor->read_opcode; 706 707 map_id = 0; 708 /* Determine if there are any optional Detection Command Descriptors */ 709 for (i = 0; i < smpt_len; i += 2) { 710 if (smpt[i] & SMPT_DESC_TYPE_MAP) 711 break; 712 713 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); 714 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); 715 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); 716 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); 717 addr = smpt[i + 1]; 718 719 err = spi_nor_read_raw(nor, addr, 1, buf); 720 if (err) { 721 ret = ERR_PTR(err); 722 goto out; 723 } 724 725 /* 726 * Build an index value that is used to select the Sector Map 727 * Configuration that is currently in use. 728 */ 729 map_id = map_id << 1 | !!(*buf & read_data_mask); 730 } 731 732 /* 733 * If command descriptors are provided, they always precede map 734 * descriptors in the table. There is no need to start the iteration 735 * over smpt array all over again. 736 * 737 * Find the matching configuration map. 738 */ 739 ret = ERR_PTR(-EINVAL); 740 while (i < smpt_len) { 741 if (SMPT_MAP_ID(smpt[i]) == map_id) { 742 ret = smpt + i; 743 break; 744 } 745 746 /* 747 * If there are no more configuration map descriptors and no 748 * configuration ID matched the configuration identifier, the 749 * sector address map is unknown. 750 */ 751 if (smpt[i] & SMPT_DESC_END) 752 break; 753 754 /* increment the table index to the next map */ 755 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; 756 } 757 758 /* fall through */ 759 out: 760 kfree(buf); 761 nor->addr_width = addr_width; 762 nor->read_dummy = read_dummy; 763 nor->read_opcode = read_opcode; 764 return ret; 765 } 766 767 static void spi_nor_region_mark_end(struct spi_nor_erase_region *region) 768 { 769 region->offset |= SNOR_LAST_REGION; 770 } 771 772 static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) 773 { 774 region->offset |= SNOR_OVERLAID_REGION; 775 } 776 777 /** 778 * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid 779 * @region: pointer to a structure that describes a SPI NOR erase region 780 * @erase: pointer to a structure that describes a SPI NOR erase type 781 * @erase_type: erase type bitmask 782 */ 783 static void 784 spi_nor_region_check_overlay(struct spi_nor_erase_region *region, 785 const struct spi_nor_erase_type *erase, 786 const u8 erase_type) 787 { 788 int i; 789 790 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 791 if (!(erase[i].size && erase_type & BIT(erase[i].idx))) 792 continue; 793 if (region->size & erase[i].size_mask) { 794 spi_nor_region_mark_overlay(region); 795 return; 796 } 797 } 798 } 799 800 /** 801 * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map 802 * @nor: pointer to a 'struct spi_nor' 803 * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' that is 804 * used for storing SFDP parsed data 805 * @smpt: pointer to the sector map parameter table 806 * 807 * Return: 0 on success, -errno otherwise. 808 */ 809 static int 810 spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, 811 struct spi_nor_flash_parameter *params, 812 const u32 *smpt) 813 { 814 struct spi_nor_erase_map *map = ¶ms->erase_map; 815 struct spi_nor_erase_type *erase = map->erase_type; 816 struct spi_nor_erase_region *region; 817 u64 offset; 818 u32 region_count; 819 int i, j; 820 u8 uniform_erase_type, save_uniform_erase_type; 821 u8 erase_type, regions_erase_type; 822 823 region_count = SMPT_MAP_REGION_COUNT(*smpt); 824 /* 825 * The regions will be freed when the driver detaches from the 826 * device. 827 */ 828 region = devm_kcalloc(nor->dev, region_count, sizeof(*region), 829 GFP_KERNEL); 830 if (!region) 831 return -ENOMEM; 832 map->regions = region; 833 834 uniform_erase_type = 0xff; 835 regions_erase_type = 0; 836 offset = 0; 837 /* Populate regions. */ 838 for (i = 0; i < region_count; i++) { 839 j = i + 1; /* index for the region dword */ 840 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]); 841 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]); 842 region[i].offset = offset | erase_type; 843 844 spi_nor_region_check_overlay(®ion[i], erase, erase_type); 845 846 /* 847 * Save the erase types that are supported in all regions and 848 * can erase the entire flash memory. 849 */ 850 uniform_erase_type &= erase_type; 851 852 /* 853 * regions_erase_type mask will indicate all the erase types 854 * supported in this configuration map. 855 */ 856 regions_erase_type |= erase_type; 857 858 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + 859 region[i].size; 860 } 861 spi_nor_region_mark_end(®ion[i - 1]); 862 863 save_uniform_erase_type = map->uniform_erase_type; 864 map->uniform_erase_type = spi_nor_sort_erase_mask(map, 865 uniform_erase_type); 866 867 if (!regions_erase_type) { 868 /* 869 * Roll back to the previous uniform_erase_type mask, SMPT is 870 * broken. 871 */ 872 map->uniform_erase_type = save_uniform_erase_type; 873 return -EINVAL; 874 } 875 876 /* 877 * BFPT advertises all the erase types supported by all the possible 878 * map configurations. Mask out the erase types that are not supported 879 * by the current map configuration. 880 */ 881 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) 882 if (!(regions_erase_type & BIT(erase[i].idx))) 883 spi_nor_set_erase_type(&erase[i], 0, 0xFF); 884 885 return 0; 886 } 887 888 /** 889 * spi_nor_parse_smpt() - parse Sector Map Parameter Table 890 * @nor: pointer to a 'struct spi_nor' 891 * @smpt_header: sector map parameter table header 892 * @params: pointer to a duplicate 'struct spi_nor_flash_parameter' 893 * that is used for storing SFDP parsed data 894 * 895 * This table is optional, but when available, we parse it to identify the 896 * location and size of sectors within the main data array of the flash memory 897 * device and to identify which Erase Types are supported by each sector. 898 * 899 * Return: 0 on success, -errno otherwise. 900 */ 901 static int spi_nor_parse_smpt(struct spi_nor *nor, 902 const struct sfdp_parameter_header *smpt_header, 903 struct spi_nor_flash_parameter *params) 904 { 905 const u32 *sector_map; 906 u32 *smpt; 907 size_t len; 908 u32 addr; 909 int ret; 910 911 /* Read the Sector Map Parameter Table. */ 912 len = smpt_header->length * sizeof(*smpt); 913 smpt = kmalloc(len, GFP_KERNEL); 914 if (!smpt) 915 return -ENOMEM; 916 917 addr = SFDP_PARAM_HEADER_PTP(smpt_header); 918 ret = spi_nor_read_sfdp(nor, addr, len, smpt); 919 if (ret) 920 goto out; 921 922 /* Fix endianness of the SMPT DWORDs. */ 923 le32_to_cpu_array(smpt, smpt_header->length); 924 925 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length); 926 if (IS_ERR(sector_map)) { 927 ret = PTR_ERR(sector_map); 928 goto out; 929 } 930 931 ret = spi_nor_init_non_uniform_erase_map(nor, params, sector_map); 932 if (ret) 933 goto out; 934 935 spi_nor_regions_sort_erase_types(¶ms->erase_map); 936 /* fall through */ 937 out: 938 kfree(smpt); 939 return ret; 940 } 941 942 /** 943 * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table 944 * @nor: pointer to a 'struct spi_nor'. 945 * @param_header: pointer to the 'struct sfdp_parameter_header' describing 946 * the 4-Byte Address Instruction Table length and version. 947 * @params: pointer to the 'struct spi_nor_flash_parameter' to be. 948 * 949 * Return: 0 on success, -errno otherwise. 950 */ 951 static int spi_nor_parse_4bait(struct spi_nor *nor, 952 const struct sfdp_parameter_header *param_header, 953 struct spi_nor_flash_parameter *params) 954 { 955 static const struct sfdp_4bait reads[] = { 956 { SNOR_HWCAPS_READ, BIT(0) }, 957 { SNOR_HWCAPS_READ_FAST, BIT(1) }, 958 { SNOR_HWCAPS_READ_1_1_2, BIT(2) }, 959 { SNOR_HWCAPS_READ_1_2_2, BIT(3) }, 960 { SNOR_HWCAPS_READ_1_1_4, BIT(4) }, 961 { SNOR_HWCAPS_READ_1_4_4, BIT(5) }, 962 { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) }, 963 { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) }, 964 { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) }, 965 }; 966 static const struct sfdp_4bait programs[] = { 967 { SNOR_HWCAPS_PP, BIT(6) }, 968 { SNOR_HWCAPS_PP_1_1_4, BIT(7) }, 969 { SNOR_HWCAPS_PP_1_4_4, BIT(8) }, 970 }; 971 static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = { 972 { 0u /* not used */, BIT(9) }, 973 { 0u /* not used */, BIT(10) }, 974 { 0u /* not used */, BIT(11) }, 975 { 0u /* not used */, BIT(12) }, 976 }; 977 struct spi_nor_pp_command *params_pp = params->page_programs; 978 struct spi_nor_erase_map *map = ¶ms->erase_map; 979 struct spi_nor_erase_type *erase_type = map->erase_type; 980 u32 *dwords; 981 size_t len; 982 u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask; 983 int i, ret; 984 985 if (param_header->major != SFDP_JESD216_MAJOR || 986 param_header->length < SFDP_4BAIT_DWORD_MAX) 987 return -EINVAL; 988 989 /* Read the 4-byte Address Instruction Table. */ 990 len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX; 991 992 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ 993 dwords = kmalloc(len, GFP_KERNEL); 994 if (!dwords) 995 return -ENOMEM; 996 997 addr = SFDP_PARAM_HEADER_PTP(param_header); 998 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 999 if (ret) 1000 goto out; 1001 1002 /* Fix endianness of the 4BAIT DWORDs. */ 1003 le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX); 1004 1005 /* 1006 * Compute the subset of (Fast) Read commands for which the 4-byte 1007 * version is supported. 1008 */ 1009 discard_hwcaps = 0; 1010 read_hwcaps = 0; 1011 for (i = 0; i < ARRAY_SIZE(reads); i++) { 1012 const struct sfdp_4bait *read = &reads[i]; 1013 1014 discard_hwcaps |= read->hwcaps; 1015 if ((params->hwcaps.mask & read->hwcaps) && 1016 (dwords[0] & read->supported_bit)) 1017 read_hwcaps |= read->hwcaps; 1018 } 1019 1020 /* 1021 * Compute the subset of Page Program commands for which the 4-byte 1022 * version is supported. 1023 */ 1024 pp_hwcaps = 0; 1025 for (i = 0; i < ARRAY_SIZE(programs); i++) { 1026 const struct sfdp_4bait *program = &programs[i]; 1027 1028 /* 1029 * The 4 Byte Address Instruction (Optional) Table is the only 1030 * SFDP table that indicates support for Page Program Commands. 1031 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest 1032 * authority for specifying Page Program support. 1033 */ 1034 discard_hwcaps |= program->hwcaps; 1035 if (dwords[0] & program->supported_bit) 1036 pp_hwcaps |= program->hwcaps; 1037 } 1038 1039 /* 1040 * Compute the subset of Sector Erase commands for which the 4-byte 1041 * version is supported. 1042 */ 1043 erase_mask = 0; 1044 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1045 const struct sfdp_4bait *erase = &erases[i]; 1046 1047 if (dwords[0] & erase->supported_bit) 1048 erase_mask |= BIT(i); 1049 } 1050 1051 /* Replicate the sort done for the map's erase types in BFPT. */ 1052 erase_mask = spi_nor_sort_erase_mask(map, erase_mask); 1053 1054 /* 1055 * We need at least one 4-byte op code per read, program and erase 1056 * operation; the .read(), .write() and .erase() hooks share the 1057 * nor->addr_width value. 1058 */ 1059 if (!read_hwcaps || !pp_hwcaps || !erase_mask) 1060 goto out; 1061 1062 /* 1063 * Discard all operations from the 4-byte instruction set which are 1064 * not supported by this memory. 1065 */ 1066 params->hwcaps.mask &= ~discard_hwcaps; 1067 params->hwcaps.mask |= (read_hwcaps | pp_hwcaps); 1068 1069 /* Use the 4-byte address instruction set. */ 1070 for (i = 0; i < SNOR_CMD_READ_MAX; i++) { 1071 struct spi_nor_read_command *read_cmd = ¶ms->reads[i]; 1072 1073 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode); 1074 } 1075 1076 /* 4BAIT is the only SFDP table that indicates page program support. */ 1077 if (pp_hwcaps & SNOR_HWCAPS_PP) { 1078 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP], 1079 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1); 1080 /* 1081 * Since xSPI Page Program opcode is backward compatible with 1082 * Legacy SPI, use Legacy SPI opcode there as well. 1083 */ 1084 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_8_8_8_DTR], 1085 SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR); 1086 } 1087 if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4) 1088 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4], 1089 SPINOR_OP_PP_1_1_4_4B, 1090 SNOR_PROTO_1_1_4); 1091 if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4) 1092 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4], 1093 SPINOR_OP_PP_1_4_4_4B, 1094 SNOR_PROTO_1_4_4); 1095 1096 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1097 if (erase_mask & BIT(i)) 1098 erase_type[i].opcode = (dwords[1] >> 1099 erase_type[i].idx * 8) & 0xFF; 1100 else 1101 spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF); 1102 } 1103 1104 /* 1105 * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes() 1106 * later because we already did the conversion to 4byte opcodes. Also, 1107 * this latest function implements a legacy quirk for the erase size of 1108 * Spansion memory. However this quirk is no longer needed with new 1109 * SFDP compliant memories. 1110 */ 1111 nor->addr_width = 4; 1112 nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT; 1113 1114 /* fall through */ 1115 out: 1116 kfree(dwords); 1117 return ret; 1118 } 1119 1120 #define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29) 1121 #define PROFILE1_DWORD1_RDSR_DUMMY BIT(28) 1122 #define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8) 1123 #define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7) 1124 #define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27) 1125 #define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17) 1126 #define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7) 1127 1128 /** 1129 * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table 1130 * @nor: pointer to a 'struct spi_nor' 1131 * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing 1132 * the Profile 1.0 Table length and version. 1133 * @params: pointer to the 'struct spi_nor_flash_parameter' to be. 1134 * 1135 * Return: 0 on success, -errno otherwise. 1136 */ 1137 static int spi_nor_parse_profile1(struct spi_nor *nor, 1138 const struct sfdp_parameter_header *profile1_header, 1139 struct spi_nor_flash_parameter *params) 1140 { 1141 u32 *dwords, addr; 1142 size_t len; 1143 int ret; 1144 u8 dummy, opcode; 1145 1146 len = profile1_header->length * sizeof(*dwords); 1147 dwords = kmalloc(len, GFP_KERNEL); 1148 if (!dwords) 1149 return -ENOMEM; 1150 1151 addr = SFDP_PARAM_HEADER_PTP(profile1_header); 1152 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 1153 if (ret) 1154 goto out; 1155 1156 le32_to_cpu_array(dwords, profile1_header->length); 1157 1158 /* Get 8D-8D-8D fast read opcode and dummy cycles. */ 1159 opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[0]); 1160 1161 /* Set the Read Status Register dummy cycles and dummy address bytes. */ 1162 if (dwords[0] & PROFILE1_DWORD1_RDSR_DUMMY) 1163 params->rdsr_dummy = 8; 1164 else 1165 params->rdsr_dummy = 4; 1166 1167 if (dwords[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES) 1168 params->rdsr_addr_nbytes = 4; 1169 else 1170 params->rdsr_addr_nbytes = 0; 1171 1172 /* 1173 * We don't know what speed the controller is running at. Find the 1174 * dummy cycles for the fastest frequency the flash can run at to be 1175 * sure we are never short of dummy cycles. A value of 0 means the 1176 * frequency is not supported. 1177 * 1178 * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let 1179 * flashes set the correct value if needed in their fixup hooks. 1180 */ 1181 dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[3]); 1182 if (!dummy) 1183 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, dwords[4]); 1184 if (!dummy) 1185 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, dwords[4]); 1186 if (!dummy) 1187 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, dwords[4]); 1188 if (!dummy) 1189 dev_dbg(nor->dev, 1190 "Can't find dummy cycles from Profile 1.0 table\n"); 1191 1192 /* Round up to an even value to avoid tripping controllers up. */ 1193 dummy = round_up(dummy, 2); 1194 1195 /* Update the fast read settings. */ 1196 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_8_8_8_DTR], 1197 0, dummy, opcode, 1198 SNOR_PROTO_8_8_8_DTR); 1199 1200 out: 1201 kfree(dwords); 1202 return ret; 1203 } 1204 1205 #define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31) 1206 1207 /** 1208 * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register 1209 * Map. 1210 * @nor: pointer to a 'struct spi_nor' 1211 * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing 1212 * the SCCR Map table length and version. 1213 * @params: pointer to the 'struct spi_nor_flash_parameter' to be. 1214 * 1215 * Return: 0 on success, -errno otherwise. 1216 */ 1217 static int spi_nor_parse_sccr(struct spi_nor *nor, 1218 const struct sfdp_parameter_header *sccr_header, 1219 struct spi_nor_flash_parameter *params) 1220 { 1221 u32 *dwords, addr; 1222 size_t len; 1223 int ret; 1224 1225 len = sccr_header->length * sizeof(*dwords); 1226 dwords = kmalloc(len, GFP_KERNEL); 1227 if (!dwords) 1228 return -ENOMEM; 1229 1230 addr = SFDP_PARAM_HEADER_PTP(sccr_header); 1231 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 1232 if (ret) 1233 goto out; 1234 1235 le32_to_cpu_array(dwords, sccr_header->length); 1236 1237 if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22])) 1238 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; 1239 1240 out: 1241 kfree(dwords); 1242 return ret; 1243 } 1244 1245 /** 1246 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters. 1247 * @nor: pointer to a 'struct spi_nor' 1248 * @params: pointer to the 'struct spi_nor_flash_parameter' to be 1249 * filled 1250 * 1251 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216 1252 * specification. This is a standard which tends to supported by almost all 1253 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at 1254 * runtime the main parameters needed to perform basic SPI flash operations such 1255 * as Fast Read, Page Program or Sector Erase commands. 1256 * 1257 * Return: 0 on success, -errno otherwise. 1258 */ 1259 int spi_nor_parse_sfdp(struct spi_nor *nor, 1260 struct spi_nor_flash_parameter *params) 1261 { 1262 const struct sfdp_parameter_header *param_header, *bfpt_header; 1263 struct sfdp_parameter_header *param_headers = NULL; 1264 struct sfdp_header header; 1265 struct device *dev = nor->dev; 1266 size_t psize; 1267 int i, err; 1268 1269 /* Get the SFDP header. */ 1270 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); 1271 if (err < 0) 1272 return err; 1273 1274 /* Check the SFDP header version. */ 1275 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || 1276 header.major != SFDP_JESD216_MAJOR) 1277 return -EINVAL; 1278 1279 /* 1280 * Verify that the first and only mandatory parameter header is a 1281 * Basic Flash Parameter Table header as specified in JESD216. 1282 */ 1283 bfpt_header = &header.bfpt_header; 1284 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID || 1285 bfpt_header->major != SFDP_JESD216_MAJOR) 1286 return -EINVAL; 1287 1288 /* 1289 * Allocate memory then read all parameter headers with a single 1290 * Read SFDP command. These parameter headers will actually be parsed 1291 * twice: a first time to get the latest revision of the basic flash 1292 * parameter table, then a second time to handle the supported optional 1293 * tables. 1294 * Hence we read the parameter headers once for all to reduce the 1295 * processing time. Also we use kmalloc() instead of devm_kmalloc() 1296 * because we don't need to keep these parameter headers: the allocated 1297 * memory is always released with kfree() before exiting this function. 1298 */ 1299 if (header.nph) { 1300 psize = header.nph * sizeof(*param_headers); 1301 1302 param_headers = kmalloc(psize, GFP_KERNEL); 1303 if (!param_headers) 1304 return -ENOMEM; 1305 1306 err = spi_nor_read_sfdp(nor, sizeof(header), 1307 psize, param_headers); 1308 if (err < 0) { 1309 dev_dbg(dev, "failed to read SFDP parameter headers\n"); 1310 goto exit; 1311 } 1312 } 1313 1314 /* 1315 * Check other parameter headers to get the latest revision of 1316 * the basic flash parameter table. 1317 */ 1318 for (i = 0; i < header.nph; i++) { 1319 param_header = ¶m_headers[i]; 1320 1321 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID && 1322 param_header->major == SFDP_JESD216_MAJOR && 1323 (param_header->minor > bfpt_header->minor || 1324 (param_header->minor == bfpt_header->minor && 1325 param_header->length > bfpt_header->length))) 1326 bfpt_header = param_header; 1327 } 1328 1329 err = spi_nor_parse_bfpt(nor, bfpt_header, params); 1330 if (err) 1331 goto exit; 1332 1333 /* Parse optional parameter tables. */ 1334 for (i = 0; i < header.nph; i++) { 1335 param_header = ¶m_headers[i]; 1336 1337 switch (SFDP_PARAM_HEADER_ID(param_header)) { 1338 case SFDP_SECTOR_MAP_ID: 1339 err = spi_nor_parse_smpt(nor, param_header, params); 1340 break; 1341 1342 case SFDP_4BAIT_ID: 1343 err = spi_nor_parse_4bait(nor, param_header, params); 1344 break; 1345 1346 case SFDP_PROFILE1_ID: 1347 err = spi_nor_parse_profile1(nor, param_header, params); 1348 break; 1349 1350 case SFDP_SCCR_MAP_ID: 1351 err = spi_nor_parse_sccr(nor, param_header, params); 1352 break; 1353 1354 default: 1355 break; 1356 } 1357 1358 if (err) { 1359 dev_warn(dev, "Failed to parse optional parameter table: %04x\n", 1360 SFDP_PARAM_HEADER_ID(param_header)); 1361 /* 1362 * Let's not drop all information we extracted so far 1363 * if optional table parsers fail. In case of failing, 1364 * each optional parser is responsible to roll back to 1365 * the previously known spi_nor data. 1366 */ 1367 err = 0; 1368 } 1369 } 1370 1371 exit: 1372 kfree(param_headers); 1373 return err; 1374 } 1375