1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2005, Intec Automation Inc. 4 * Copyright (C) 2014, Freescale Semiconductor, Inc. 5 */ 6 7 #include <linux/bitfield.h> 8 #include <linux/slab.h> 9 #include <linux/sort.h> 10 #include <linux/mtd/spi-nor.h> 11 12 #include "core.h" 13 14 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb) 15 #define SFDP_PARAM_HEADER_PTP(p) \ 16 (((p)->parameter_table_pointer[2] << 16) | \ 17 ((p)->parameter_table_pointer[1] << 8) | \ 18 ((p)->parameter_table_pointer[0] << 0)) 19 20 #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ 21 #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ 22 #define SFDP_4BAIT_ID 0xff84 /* 4-byte Address Instruction Table */ 23 #define SFDP_PROFILE1_ID 0xff05 /* xSPI Profile 1.0 table. */ 24 #define SFDP_SCCR_MAP_ID 0xff87 /* 25 * Status, Control and Configuration 26 * Register Map. 27 */ 28 29 #define SFDP_SIGNATURE 0x50444653U 30 31 struct sfdp_header { 32 u32 signature; /* Ox50444653U <=> "SFDP" */ 33 u8 minor; 34 u8 major; 35 u8 nph; /* 0-base number of parameter headers */ 36 u8 unused; 37 38 /* Basic Flash Parameter Table. */ 39 struct sfdp_parameter_header bfpt_header; 40 }; 41 42 /* Fast Read settings. */ 43 struct sfdp_bfpt_read { 44 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */ 45 u32 hwcaps; 46 47 /* 48 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us 49 * whether the Fast Read x-y-z command is supported. 50 */ 51 u32 supported_dword; 52 u32 supported_bit; 53 54 /* 55 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD 56 * encodes the op code, the number of mode clocks and the number of wait 57 * states to be used by Fast Read x-y-z command. 58 */ 59 u32 settings_dword; 60 u32 settings_shift; 61 62 /* The SPI protocol for this Fast Read x-y-z command. */ 63 enum spi_nor_protocol proto; 64 }; 65 66 struct sfdp_bfpt_erase { 67 /* 68 * The half-word at offset <shift> in DWORD <dword> encodes the 69 * op code and erase sector size to be used by Sector Erase commands. 70 */ 71 u32 dword; 72 u32 shift; 73 }; 74 75 #define SMPT_CMD_ADDRESS_LEN_MASK GENMASK(23, 22) 76 #define SMPT_CMD_ADDRESS_LEN_0 (0x0UL << 22) 77 #define SMPT_CMD_ADDRESS_LEN_3 (0x1UL << 22) 78 #define SMPT_CMD_ADDRESS_LEN_4 (0x2UL << 22) 79 #define SMPT_CMD_ADDRESS_LEN_USE_CURRENT (0x3UL << 22) 80 81 #define SMPT_CMD_READ_DUMMY_MASK GENMASK(19, 16) 82 #define SMPT_CMD_READ_DUMMY_SHIFT 16 83 #define SMPT_CMD_READ_DUMMY(_cmd) \ 84 (((_cmd) & SMPT_CMD_READ_DUMMY_MASK) >> SMPT_CMD_READ_DUMMY_SHIFT) 85 #define SMPT_CMD_READ_DUMMY_IS_VARIABLE 0xfUL 86 87 #define SMPT_CMD_READ_DATA_MASK GENMASK(31, 24) 88 #define SMPT_CMD_READ_DATA_SHIFT 24 89 #define SMPT_CMD_READ_DATA(_cmd) \ 90 (((_cmd) & SMPT_CMD_READ_DATA_MASK) >> SMPT_CMD_READ_DATA_SHIFT) 91 92 #define SMPT_CMD_OPCODE_MASK GENMASK(15, 8) 93 #define SMPT_CMD_OPCODE_SHIFT 8 94 #define SMPT_CMD_OPCODE(_cmd) \ 95 (((_cmd) & SMPT_CMD_OPCODE_MASK) >> SMPT_CMD_OPCODE_SHIFT) 96 97 #define SMPT_MAP_REGION_COUNT_MASK GENMASK(23, 16) 98 #define SMPT_MAP_REGION_COUNT_SHIFT 16 99 #define SMPT_MAP_REGION_COUNT(_header) \ 100 ((((_header) & SMPT_MAP_REGION_COUNT_MASK) >> \ 101 SMPT_MAP_REGION_COUNT_SHIFT) + 1) 102 103 #define SMPT_MAP_ID_MASK GENMASK(15, 8) 104 #define SMPT_MAP_ID_SHIFT 8 105 #define SMPT_MAP_ID(_header) \ 106 (((_header) & SMPT_MAP_ID_MASK) >> SMPT_MAP_ID_SHIFT) 107 108 #define SMPT_MAP_REGION_SIZE_MASK GENMASK(31, 8) 109 #define SMPT_MAP_REGION_SIZE_SHIFT 8 110 #define SMPT_MAP_REGION_SIZE(_region) \ 111 (((((_region) & SMPT_MAP_REGION_SIZE_MASK) >> \ 112 SMPT_MAP_REGION_SIZE_SHIFT) + 1) * 256) 113 114 #define SMPT_MAP_REGION_ERASE_TYPE_MASK GENMASK(3, 0) 115 #define SMPT_MAP_REGION_ERASE_TYPE(_region) \ 116 ((_region) & SMPT_MAP_REGION_ERASE_TYPE_MASK) 117 118 #define SMPT_DESC_TYPE_MAP BIT(1) 119 #define SMPT_DESC_END BIT(0) 120 121 #define SFDP_4BAIT_DWORD_MAX 2 122 123 struct sfdp_4bait { 124 /* The hardware capability. */ 125 u32 hwcaps; 126 127 /* 128 * The <supported_bit> bit in DWORD1 of the 4BAIT tells us whether 129 * the associated 4-byte address op code is supported. 130 */ 131 u32 supported_bit; 132 }; 133 134 /** 135 * spi_nor_read_raw() - raw read of serial flash memory. read_opcode, 136 * addr_width and read_dummy members of the struct spi_nor 137 * should be previously 138 * set. 139 * @nor: pointer to a 'struct spi_nor' 140 * @addr: offset in the serial flash memory 141 * @len: number of bytes to read 142 * @buf: buffer where the data is copied into (dma-safe memory) 143 * 144 * Return: 0 on success, -errno otherwise. 145 */ 146 static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) 147 { 148 ssize_t ret; 149 150 while (len) { 151 ret = spi_nor_read_data(nor, addr, len, buf); 152 if (ret < 0) 153 return ret; 154 if (!ret || ret > len) 155 return -EIO; 156 157 buf += ret; 158 addr += ret; 159 len -= ret; 160 } 161 return 0; 162 } 163 164 /** 165 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters. 166 * @nor: pointer to a 'struct spi_nor' 167 * @addr: offset in the SFDP area to start reading data from 168 * @len: number of bytes to read 169 * @buf: buffer where the SFDP data are copied into (dma-safe memory) 170 * 171 * Whatever the actual numbers of bytes for address and dummy cycles are 172 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always 173 * followed by a 3-byte address and 8 dummy clock cycles. 174 * 175 * Return: 0 on success, -errno otherwise. 176 */ 177 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, 178 size_t len, void *buf) 179 { 180 u8 addr_width, read_opcode, read_dummy; 181 int ret; 182 183 read_opcode = nor->read_opcode; 184 addr_width = nor->addr_width; 185 read_dummy = nor->read_dummy; 186 187 nor->read_opcode = SPINOR_OP_RDSFDP; 188 nor->addr_width = 3; 189 nor->read_dummy = 8; 190 191 ret = spi_nor_read_raw(nor, addr, len, buf); 192 193 nor->read_opcode = read_opcode; 194 nor->addr_width = addr_width; 195 nor->read_dummy = read_dummy; 196 197 return ret; 198 } 199 200 /** 201 * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters. 202 * @nor: pointer to a 'struct spi_nor' 203 * @addr: offset in the SFDP area to start reading data from 204 * @len: number of bytes to read 205 * @buf: buffer where the SFDP data are copied into 206 * 207 * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not 208 * guaranteed to be dma-safe. 209 * 210 * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp() 211 * otherwise. 212 */ 213 static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr, 214 size_t len, void *buf) 215 { 216 void *dma_safe_buf; 217 int ret; 218 219 dma_safe_buf = kmalloc(len, GFP_KERNEL); 220 if (!dma_safe_buf) 221 return -ENOMEM; 222 223 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf); 224 memcpy(buf, dma_safe_buf, len); 225 kfree(dma_safe_buf); 226 227 return ret; 228 } 229 230 static void 231 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read, 232 u16 half, 233 enum spi_nor_protocol proto) 234 { 235 read->num_mode_clocks = (half >> 5) & 0x07; 236 read->num_wait_states = (half >> 0) & 0x1f; 237 read->opcode = (half >> 8) & 0xff; 238 read->proto = proto; 239 } 240 241 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = { 242 /* Fast Read 1-1-2 */ 243 { 244 SNOR_HWCAPS_READ_1_1_2, 245 BFPT_DWORD(1), BIT(16), /* Supported bit */ 246 BFPT_DWORD(4), 0, /* Settings */ 247 SNOR_PROTO_1_1_2, 248 }, 249 250 /* Fast Read 1-2-2 */ 251 { 252 SNOR_HWCAPS_READ_1_2_2, 253 BFPT_DWORD(1), BIT(20), /* Supported bit */ 254 BFPT_DWORD(4), 16, /* Settings */ 255 SNOR_PROTO_1_2_2, 256 }, 257 258 /* Fast Read 2-2-2 */ 259 { 260 SNOR_HWCAPS_READ_2_2_2, 261 BFPT_DWORD(5), BIT(0), /* Supported bit */ 262 BFPT_DWORD(6), 16, /* Settings */ 263 SNOR_PROTO_2_2_2, 264 }, 265 266 /* Fast Read 1-1-4 */ 267 { 268 SNOR_HWCAPS_READ_1_1_4, 269 BFPT_DWORD(1), BIT(22), /* Supported bit */ 270 BFPT_DWORD(3), 16, /* Settings */ 271 SNOR_PROTO_1_1_4, 272 }, 273 274 /* Fast Read 1-4-4 */ 275 { 276 SNOR_HWCAPS_READ_1_4_4, 277 BFPT_DWORD(1), BIT(21), /* Supported bit */ 278 BFPT_DWORD(3), 0, /* Settings */ 279 SNOR_PROTO_1_4_4, 280 }, 281 282 /* Fast Read 4-4-4 */ 283 { 284 SNOR_HWCAPS_READ_4_4_4, 285 BFPT_DWORD(5), BIT(4), /* Supported bit */ 286 BFPT_DWORD(7), 16, /* Settings */ 287 SNOR_PROTO_4_4_4, 288 }, 289 }; 290 291 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = { 292 /* Erase Type 1 in DWORD8 bits[15:0] */ 293 {BFPT_DWORD(8), 0}, 294 295 /* Erase Type 2 in DWORD8 bits[31:16] */ 296 {BFPT_DWORD(8), 16}, 297 298 /* Erase Type 3 in DWORD9 bits[15:0] */ 299 {BFPT_DWORD(9), 0}, 300 301 /* Erase Type 4 in DWORD9 bits[31:16] */ 302 {BFPT_DWORD(9), 16}, 303 }; 304 305 /** 306 * spi_nor_set_erase_settings_from_bfpt() - set erase type settings from BFPT 307 * @erase: pointer to a structure that describes a SPI NOR erase type 308 * @size: the size of the sector/block erased by the erase type 309 * @opcode: the SPI command op code to erase the sector/block 310 * @i: erase type index as sorted in the Basic Flash Parameter Table 311 * 312 * The supported Erase Types will be sorted at init in ascending order, with 313 * the smallest Erase Type size being the first member in the erase_type array 314 * of the spi_nor_erase_map structure. Save the Erase Type index as sorted in 315 * the Basic Flash Parameter Table since it will be used later on to 316 * synchronize with the supported Erase Types defined in SFDP optional tables. 317 */ 318 static void 319 spi_nor_set_erase_settings_from_bfpt(struct spi_nor_erase_type *erase, 320 u32 size, u8 opcode, u8 i) 321 { 322 erase->idx = i; 323 spi_nor_set_erase_type(erase, size, opcode); 324 } 325 326 /** 327 * spi_nor_map_cmp_erase_type() - compare the map's erase types by size 328 * @l: member in the left half of the map's erase_type array 329 * @r: member in the right half of the map's erase_type array 330 * 331 * Comparison function used in the sort() call to sort in ascending order the 332 * map's erase types, the smallest erase type size being the first member in the 333 * sorted erase_type array. 334 * 335 * Return: the result of @l->size - @r->size 336 */ 337 static int spi_nor_map_cmp_erase_type(const void *l, const void *r) 338 { 339 const struct spi_nor_erase_type *left = l, *right = r; 340 341 return left->size - right->size; 342 } 343 344 /** 345 * spi_nor_sort_erase_mask() - sort erase mask 346 * @map: the erase map of the SPI NOR 347 * @erase_mask: the erase type mask to be sorted 348 * 349 * Replicate the sort done for the map's erase types in BFPT: sort the erase 350 * mask in ascending order with the smallest erase type size starting from 351 * BIT(0) in the sorted erase mask. 352 * 353 * Return: sorted erase mask. 354 */ 355 static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask) 356 { 357 struct spi_nor_erase_type *erase_type = map->erase_type; 358 int i; 359 u8 sorted_erase_mask = 0; 360 361 if (!erase_mask) 362 return 0; 363 364 /* Replicate the sort done for the map's erase types. */ 365 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) 366 if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx)) 367 sorted_erase_mask |= BIT(i); 368 369 return sorted_erase_mask; 370 } 371 372 /** 373 * spi_nor_regions_sort_erase_types() - sort erase types in each region 374 * @map: the erase map of the SPI NOR 375 * 376 * Function assumes that the erase types defined in the erase map are already 377 * sorted in ascending order, with the smallest erase type size being the first 378 * member in the erase_type array. It replicates the sort done for the map's 379 * erase types. Each region's erase bitmask will indicate which erase types are 380 * supported from the sorted erase types defined in the erase map. 381 * Sort the all region's erase type at init in order to speed up the process of 382 * finding the best erase command at runtime. 383 */ 384 static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) 385 { 386 struct spi_nor_erase_region *region = map->regions; 387 u8 region_erase_mask, sorted_erase_mask; 388 389 while (region) { 390 region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; 391 392 sorted_erase_mask = spi_nor_sort_erase_mask(map, 393 region_erase_mask); 394 395 /* Overwrite erase mask. */ 396 region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | 397 sorted_erase_mask; 398 399 region = spi_nor_region_next(region); 400 } 401 } 402 403 /** 404 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table. 405 * @nor: pointer to a 'struct spi_nor' 406 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing 407 * the Basic Flash Parameter Table length and version 408 * 409 * The Basic Flash Parameter Table is the main and only mandatory table as 410 * defined by the SFDP (JESD216) specification. 411 * It provides us with the total size (memory density) of the data array and 412 * the number of address bytes for Fast Read, Page Program and Sector Erase 413 * commands. 414 * For Fast READ commands, it also gives the number of mode clock cycles and 415 * wait states (regrouped in the number of dummy clock cycles) for each 416 * supported instruction op code. 417 * For Page Program, the page size is now available since JESD216 rev A, however 418 * the supported instruction op codes are still not provided. 419 * For Sector Erase commands, this table stores the supported instruction op 420 * codes and the associated sector sizes. 421 * Finally, the Quad Enable Requirements (QER) are also available since JESD216 422 * rev A. The QER bits encode the manufacturer dependent procedure to be 423 * executed to set the Quad Enable (QE) bit in some internal register of the 424 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before 425 * sending any Quad SPI command to the memory. Actually, setting the QE bit 426 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2 427 * and IO3 hence enabling 4 (Quad) I/O lines. 428 * 429 * Return: 0 on success, -errno otherwise. 430 */ 431 static int spi_nor_parse_bfpt(struct spi_nor *nor, 432 const struct sfdp_parameter_header *bfpt_header) 433 { 434 struct spi_nor_flash_parameter *params = nor->params; 435 struct spi_nor_erase_map *map = ¶ms->erase_map; 436 struct spi_nor_erase_type *erase_type = map->erase_type; 437 struct sfdp_bfpt bfpt; 438 size_t len; 439 int i, cmd, err; 440 u32 addr, val; 441 u16 half; 442 u8 erase_mask; 443 444 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */ 445 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216) 446 return -EINVAL; 447 448 /* Read the Basic Flash Parameter Table. */ 449 len = min_t(size_t, sizeof(bfpt), 450 bfpt_header->length * sizeof(u32)); 451 addr = SFDP_PARAM_HEADER_PTP(bfpt_header); 452 memset(&bfpt, 0, sizeof(bfpt)); 453 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt); 454 if (err < 0) 455 return err; 456 457 /* Fix endianness of the BFPT DWORDs. */ 458 le32_to_cpu_array(bfpt.dwords, BFPT_DWORD_MAX); 459 460 /* Number of address bytes. */ 461 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { 462 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: 463 case BFPT_DWORD1_ADDRESS_BYTES_3_OR_4: 464 nor->addr_width = 3; 465 break; 466 467 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY: 468 nor->addr_width = 4; 469 break; 470 471 default: 472 break; 473 } 474 475 /* Flash Memory Density (in bits). */ 476 val = bfpt.dwords[BFPT_DWORD(2)]; 477 if (val & BIT(31)) { 478 val &= ~BIT(31); 479 480 /* 481 * Prevent overflows on params->size. Anyway, a NOR of 2^64 482 * bits is unlikely to exist so this error probably means 483 * the BFPT we are reading is corrupted/wrong. 484 */ 485 if (val > 63) 486 return -EINVAL; 487 488 params->size = 1ULL << val; 489 } else { 490 params->size = val + 1; 491 } 492 params->size >>= 3; /* Convert to bytes. */ 493 494 /* Fast Read settings. */ 495 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) { 496 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i]; 497 struct spi_nor_read_command *read; 498 499 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) { 500 params->hwcaps.mask &= ~rd->hwcaps; 501 continue; 502 } 503 504 params->hwcaps.mask |= rd->hwcaps; 505 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps); 506 read = ¶ms->reads[cmd]; 507 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift; 508 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto); 509 } 510 511 /* 512 * Sector Erase settings. Reinitialize the uniform erase map using the 513 * Erase Types defined in the bfpt table. 514 */ 515 erase_mask = 0; 516 memset(¶ms->erase_map, 0, sizeof(params->erase_map)); 517 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) { 518 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i]; 519 u32 erasesize; 520 u8 opcode; 521 522 half = bfpt.dwords[er->dword] >> er->shift; 523 erasesize = half & 0xff; 524 525 /* erasesize == 0 means this Erase Type is not supported. */ 526 if (!erasesize) 527 continue; 528 529 erasesize = 1U << erasesize; 530 opcode = (half >> 8) & 0xff; 531 erase_mask |= BIT(i); 532 spi_nor_set_erase_settings_from_bfpt(&erase_type[i], erasesize, 533 opcode, i); 534 } 535 spi_nor_init_uniform_erase_map(map, erase_mask, params->size); 536 /* 537 * Sort all the map's Erase Types in ascending order with the smallest 538 * erase size being the first member in the erase_type array. 539 */ 540 sort(erase_type, SNOR_ERASE_TYPE_MAX, sizeof(erase_type[0]), 541 spi_nor_map_cmp_erase_type, NULL); 542 /* 543 * Sort the erase types in the uniform region in order to update the 544 * uniform_erase_type bitmask. The bitmask will be used later on when 545 * selecting the uniform erase. 546 */ 547 spi_nor_regions_sort_erase_types(map); 548 map->uniform_erase_type = map->uniform_region.offset & 549 SNOR_ERASE_TYPE_MASK; 550 551 /* Stop here if not JESD216 rev A or later. */ 552 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216) 553 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); 554 555 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ 556 val = bfpt.dwords[BFPT_DWORD(11)]; 557 val &= BFPT_DWORD11_PAGE_SIZE_MASK; 558 val >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; 559 params->page_size = 1U << val; 560 561 /* Quad Enable Requirements. */ 562 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) { 563 case BFPT_DWORD15_QER_NONE: 564 params->quad_enable = NULL; 565 break; 566 567 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY: 568 /* 569 * Writing only one byte to the Status Register has the 570 * side-effect of clearing Status Register 2. 571 */ 572 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD: 573 /* 574 * Read Configuration Register (35h) instruction is not 575 * supported. 576 */ 577 nor->flags |= SNOR_F_HAS_16BIT_SR | SNOR_F_NO_READ_CR; 578 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 579 break; 580 581 case BFPT_DWORD15_QER_SR1_BIT6: 582 nor->flags &= ~SNOR_F_HAS_16BIT_SR; 583 params->quad_enable = spi_nor_sr1_bit6_quad_enable; 584 break; 585 586 case BFPT_DWORD15_QER_SR2_BIT7: 587 nor->flags &= ~SNOR_F_HAS_16BIT_SR; 588 params->quad_enable = spi_nor_sr2_bit7_quad_enable; 589 break; 590 591 case BFPT_DWORD15_QER_SR2_BIT1: 592 /* 593 * JESD216 rev B or later does not specify if writing only one 594 * byte to the Status Register clears or not the Status 595 * Register 2, so let's be cautious and keep the default 596 * assumption of a 16-bit Write Status (01h) command. 597 */ 598 nor->flags |= SNOR_F_HAS_16BIT_SR; 599 600 params->quad_enable = spi_nor_sr2_bit1_quad_enable; 601 break; 602 603 default: 604 dev_dbg(nor->dev, "BFPT QER reserved value used\n"); 605 break; 606 } 607 608 /* Soft Reset support. */ 609 if (bfpt.dwords[BFPT_DWORD(16)] & BFPT_DWORD16_SWRST_EN_RST) 610 nor->flags |= SNOR_F_SOFT_RESET; 611 612 /* Stop here if not JESD216 rev C or later. */ 613 if (bfpt_header->length == BFPT_DWORD_MAX_JESD216B) 614 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); 615 616 /* 8D-8D-8D command extension. */ 617 switch (bfpt.dwords[BFPT_DWORD(18)] & BFPT_DWORD18_CMD_EXT_MASK) { 618 case BFPT_DWORD18_CMD_EXT_REP: 619 nor->cmd_ext_type = SPI_NOR_EXT_REPEAT; 620 break; 621 622 case BFPT_DWORD18_CMD_EXT_INV: 623 nor->cmd_ext_type = SPI_NOR_EXT_INVERT; 624 break; 625 626 case BFPT_DWORD18_CMD_EXT_RES: 627 dev_dbg(nor->dev, "Reserved command extension used\n"); 628 break; 629 630 case BFPT_DWORD18_CMD_EXT_16B: 631 dev_dbg(nor->dev, "16-bit opcodes not supported\n"); 632 return -EOPNOTSUPP; 633 } 634 635 return spi_nor_post_bfpt_fixups(nor, bfpt_header, &bfpt); 636 } 637 638 /** 639 * spi_nor_smpt_addr_width() - return the address width used in the 640 * configuration detection command. 641 * @nor: pointer to a 'struct spi_nor' 642 * @settings: configuration detection command descriptor, dword1 643 */ 644 static u8 spi_nor_smpt_addr_width(const struct spi_nor *nor, const u32 settings) 645 { 646 switch (settings & SMPT_CMD_ADDRESS_LEN_MASK) { 647 case SMPT_CMD_ADDRESS_LEN_0: 648 return 0; 649 case SMPT_CMD_ADDRESS_LEN_3: 650 return 3; 651 case SMPT_CMD_ADDRESS_LEN_4: 652 return 4; 653 case SMPT_CMD_ADDRESS_LEN_USE_CURRENT: 654 default: 655 return nor->addr_width; 656 } 657 } 658 659 /** 660 * spi_nor_smpt_read_dummy() - return the configuration detection command read 661 * latency, in clock cycles. 662 * @nor: pointer to a 'struct spi_nor' 663 * @settings: configuration detection command descriptor, dword1 664 * 665 * Return: the number of dummy cycles for an SMPT read 666 */ 667 static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings) 668 { 669 u8 read_dummy = SMPT_CMD_READ_DUMMY(settings); 670 671 if (read_dummy == SMPT_CMD_READ_DUMMY_IS_VARIABLE) 672 return nor->read_dummy; 673 return read_dummy; 674 } 675 676 /** 677 * spi_nor_get_map_in_use() - get the configuration map in use 678 * @nor: pointer to a 'struct spi_nor' 679 * @smpt: pointer to the sector map parameter table 680 * @smpt_len: sector map parameter table length 681 * 682 * Return: pointer to the map in use, ERR_PTR(-errno) otherwise. 683 */ 684 static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, 685 u8 smpt_len) 686 { 687 const u32 *ret; 688 u8 *buf; 689 u32 addr; 690 int err; 691 u8 i; 692 u8 addr_width, read_opcode, read_dummy; 693 u8 read_data_mask, map_id; 694 695 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ 696 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 697 if (!buf) 698 return ERR_PTR(-ENOMEM); 699 700 addr_width = nor->addr_width; 701 read_dummy = nor->read_dummy; 702 read_opcode = nor->read_opcode; 703 704 map_id = 0; 705 /* Determine if there are any optional Detection Command Descriptors */ 706 for (i = 0; i < smpt_len; i += 2) { 707 if (smpt[i] & SMPT_DESC_TYPE_MAP) 708 break; 709 710 read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); 711 nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); 712 nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); 713 nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); 714 addr = smpt[i + 1]; 715 716 err = spi_nor_read_raw(nor, addr, 1, buf); 717 if (err) { 718 ret = ERR_PTR(err); 719 goto out; 720 } 721 722 /* 723 * Build an index value that is used to select the Sector Map 724 * Configuration that is currently in use. 725 */ 726 map_id = map_id << 1 | !!(*buf & read_data_mask); 727 } 728 729 /* 730 * If command descriptors are provided, they always precede map 731 * descriptors in the table. There is no need to start the iteration 732 * over smpt array all over again. 733 * 734 * Find the matching configuration map. 735 */ 736 ret = ERR_PTR(-EINVAL); 737 while (i < smpt_len) { 738 if (SMPT_MAP_ID(smpt[i]) == map_id) { 739 ret = smpt + i; 740 break; 741 } 742 743 /* 744 * If there are no more configuration map descriptors and no 745 * configuration ID matched the configuration identifier, the 746 * sector address map is unknown. 747 */ 748 if (smpt[i] & SMPT_DESC_END) 749 break; 750 751 /* increment the table index to the next map */ 752 i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; 753 } 754 755 /* fall through */ 756 out: 757 kfree(buf); 758 nor->addr_width = addr_width; 759 nor->read_dummy = read_dummy; 760 nor->read_opcode = read_opcode; 761 return ret; 762 } 763 764 static void spi_nor_region_mark_end(struct spi_nor_erase_region *region) 765 { 766 region->offset |= SNOR_LAST_REGION; 767 } 768 769 static void spi_nor_region_mark_overlay(struct spi_nor_erase_region *region) 770 { 771 region->offset |= SNOR_OVERLAID_REGION; 772 } 773 774 /** 775 * spi_nor_region_check_overlay() - set overlay bit when the region is overlaid 776 * @region: pointer to a structure that describes a SPI NOR erase region 777 * @erase: pointer to a structure that describes a SPI NOR erase type 778 * @erase_type: erase type bitmask 779 */ 780 static void 781 spi_nor_region_check_overlay(struct spi_nor_erase_region *region, 782 const struct spi_nor_erase_type *erase, 783 const u8 erase_type) 784 { 785 int i; 786 787 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 788 if (!(erase[i].size && erase_type & BIT(erase[i].idx))) 789 continue; 790 if (region->size & erase[i].size_mask) { 791 spi_nor_region_mark_overlay(region); 792 return; 793 } 794 } 795 } 796 797 /** 798 * spi_nor_init_non_uniform_erase_map() - initialize the non-uniform erase map 799 * @nor: pointer to a 'struct spi_nor' 800 * @smpt: pointer to the sector map parameter table 801 * 802 * Return: 0 on success, -errno otherwise. 803 */ 804 static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, 805 const u32 *smpt) 806 { 807 struct spi_nor_erase_map *map = &nor->params->erase_map; 808 struct spi_nor_erase_type *erase = map->erase_type; 809 struct spi_nor_erase_region *region; 810 u64 offset; 811 u32 region_count; 812 int i, j; 813 u8 uniform_erase_type, save_uniform_erase_type; 814 u8 erase_type, regions_erase_type; 815 816 region_count = SMPT_MAP_REGION_COUNT(*smpt); 817 /* 818 * The regions will be freed when the driver detaches from the 819 * device. 820 */ 821 region = devm_kcalloc(nor->dev, region_count, sizeof(*region), 822 GFP_KERNEL); 823 if (!region) 824 return -ENOMEM; 825 map->regions = region; 826 827 uniform_erase_type = 0xff; 828 regions_erase_type = 0; 829 offset = 0; 830 /* Populate regions. */ 831 for (i = 0; i < region_count; i++) { 832 j = i + 1; /* index for the region dword */ 833 region[i].size = SMPT_MAP_REGION_SIZE(smpt[j]); 834 erase_type = SMPT_MAP_REGION_ERASE_TYPE(smpt[j]); 835 region[i].offset = offset | erase_type; 836 837 spi_nor_region_check_overlay(®ion[i], erase, erase_type); 838 839 /* 840 * Save the erase types that are supported in all regions and 841 * can erase the entire flash memory. 842 */ 843 uniform_erase_type &= erase_type; 844 845 /* 846 * regions_erase_type mask will indicate all the erase types 847 * supported in this configuration map. 848 */ 849 regions_erase_type |= erase_type; 850 851 offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + 852 region[i].size; 853 } 854 spi_nor_region_mark_end(®ion[i - 1]); 855 856 save_uniform_erase_type = map->uniform_erase_type; 857 map->uniform_erase_type = spi_nor_sort_erase_mask(map, 858 uniform_erase_type); 859 860 if (!regions_erase_type) { 861 /* 862 * Roll back to the previous uniform_erase_type mask, SMPT is 863 * broken. 864 */ 865 map->uniform_erase_type = save_uniform_erase_type; 866 return -EINVAL; 867 } 868 869 /* 870 * BFPT advertises all the erase types supported by all the possible 871 * map configurations. Mask out the erase types that are not supported 872 * by the current map configuration. 873 */ 874 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) 875 if (!(regions_erase_type & BIT(erase[i].idx))) 876 spi_nor_set_erase_type(&erase[i], 0, 0xFF); 877 878 return 0; 879 } 880 881 /** 882 * spi_nor_parse_smpt() - parse Sector Map Parameter Table 883 * @nor: pointer to a 'struct spi_nor' 884 * @smpt_header: sector map parameter table header 885 * 886 * This table is optional, but when available, we parse it to identify the 887 * location and size of sectors within the main data array of the flash memory 888 * device and to identify which Erase Types are supported by each sector. 889 * 890 * Return: 0 on success, -errno otherwise. 891 */ 892 static int spi_nor_parse_smpt(struct spi_nor *nor, 893 const struct sfdp_parameter_header *smpt_header) 894 { 895 const u32 *sector_map; 896 u32 *smpt; 897 size_t len; 898 u32 addr; 899 int ret; 900 901 /* Read the Sector Map Parameter Table. */ 902 len = smpt_header->length * sizeof(*smpt); 903 smpt = kmalloc(len, GFP_KERNEL); 904 if (!smpt) 905 return -ENOMEM; 906 907 addr = SFDP_PARAM_HEADER_PTP(smpt_header); 908 ret = spi_nor_read_sfdp(nor, addr, len, smpt); 909 if (ret) 910 goto out; 911 912 /* Fix endianness of the SMPT DWORDs. */ 913 le32_to_cpu_array(smpt, smpt_header->length); 914 915 sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length); 916 if (IS_ERR(sector_map)) { 917 ret = PTR_ERR(sector_map); 918 goto out; 919 } 920 921 ret = spi_nor_init_non_uniform_erase_map(nor, sector_map); 922 if (ret) 923 goto out; 924 925 spi_nor_regions_sort_erase_types(&nor->params->erase_map); 926 /* fall through */ 927 out: 928 kfree(smpt); 929 return ret; 930 } 931 932 /** 933 * spi_nor_parse_4bait() - parse the 4-Byte Address Instruction Table 934 * @nor: pointer to a 'struct spi_nor'. 935 * @param_header: pointer to the 'struct sfdp_parameter_header' describing 936 * the 4-Byte Address Instruction Table length and version. 937 * 938 * Return: 0 on success, -errno otherwise. 939 */ 940 static int spi_nor_parse_4bait(struct spi_nor *nor, 941 const struct sfdp_parameter_header *param_header) 942 { 943 static const struct sfdp_4bait reads[] = { 944 { SNOR_HWCAPS_READ, BIT(0) }, 945 { SNOR_HWCAPS_READ_FAST, BIT(1) }, 946 { SNOR_HWCAPS_READ_1_1_2, BIT(2) }, 947 { SNOR_HWCAPS_READ_1_2_2, BIT(3) }, 948 { SNOR_HWCAPS_READ_1_1_4, BIT(4) }, 949 { SNOR_HWCAPS_READ_1_4_4, BIT(5) }, 950 { SNOR_HWCAPS_READ_1_1_1_DTR, BIT(13) }, 951 { SNOR_HWCAPS_READ_1_2_2_DTR, BIT(14) }, 952 { SNOR_HWCAPS_READ_1_4_4_DTR, BIT(15) }, 953 }; 954 static const struct sfdp_4bait programs[] = { 955 { SNOR_HWCAPS_PP, BIT(6) }, 956 { SNOR_HWCAPS_PP_1_1_4, BIT(7) }, 957 { SNOR_HWCAPS_PP_1_4_4, BIT(8) }, 958 }; 959 static const struct sfdp_4bait erases[SNOR_ERASE_TYPE_MAX] = { 960 { 0u /* not used */, BIT(9) }, 961 { 0u /* not used */, BIT(10) }, 962 { 0u /* not used */, BIT(11) }, 963 { 0u /* not used */, BIT(12) }, 964 }; 965 struct spi_nor_flash_parameter *params = nor->params; 966 struct spi_nor_pp_command *params_pp = params->page_programs; 967 struct spi_nor_erase_map *map = ¶ms->erase_map; 968 struct spi_nor_erase_type *erase_type = map->erase_type; 969 u32 *dwords; 970 size_t len; 971 u32 addr, discard_hwcaps, read_hwcaps, pp_hwcaps, erase_mask; 972 int i, ret; 973 974 if (param_header->major != SFDP_JESD216_MAJOR || 975 param_header->length < SFDP_4BAIT_DWORD_MAX) 976 return -EINVAL; 977 978 /* Read the 4-byte Address Instruction Table. */ 979 len = sizeof(*dwords) * SFDP_4BAIT_DWORD_MAX; 980 981 /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ 982 dwords = kmalloc(len, GFP_KERNEL); 983 if (!dwords) 984 return -ENOMEM; 985 986 addr = SFDP_PARAM_HEADER_PTP(param_header); 987 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 988 if (ret) 989 goto out; 990 991 /* Fix endianness of the 4BAIT DWORDs. */ 992 le32_to_cpu_array(dwords, SFDP_4BAIT_DWORD_MAX); 993 994 /* 995 * Compute the subset of (Fast) Read commands for which the 4-byte 996 * version is supported. 997 */ 998 discard_hwcaps = 0; 999 read_hwcaps = 0; 1000 for (i = 0; i < ARRAY_SIZE(reads); i++) { 1001 const struct sfdp_4bait *read = &reads[i]; 1002 1003 discard_hwcaps |= read->hwcaps; 1004 if ((params->hwcaps.mask & read->hwcaps) && 1005 (dwords[0] & read->supported_bit)) 1006 read_hwcaps |= read->hwcaps; 1007 } 1008 1009 /* 1010 * Compute the subset of Page Program commands for which the 4-byte 1011 * version is supported. 1012 */ 1013 pp_hwcaps = 0; 1014 for (i = 0; i < ARRAY_SIZE(programs); i++) { 1015 const struct sfdp_4bait *program = &programs[i]; 1016 1017 /* 1018 * The 4 Byte Address Instruction (Optional) Table is the only 1019 * SFDP table that indicates support for Page Program Commands. 1020 * Bypass the params->hwcaps.mask and consider 4BAIT the biggest 1021 * authority for specifying Page Program support. 1022 */ 1023 discard_hwcaps |= program->hwcaps; 1024 if (dwords[0] & program->supported_bit) 1025 pp_hwcaps |= program->hwcaps; 1026 } 1027 1028 /* 1029 * Compute the subset of Sector Erase commands for which the 4-byte 1030 * version is supported. 1031 */ 1032 erase_mask = 0; 1033 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1034 const struct sfdp_4bait *erase = &erases[i]; 1035 1036 if (dwords[0] & erase->supported_bit) 1037 erase_mask |= BIT(i); 1038 } 1039 1040 /* Replicate the sort done for the map's erase types in BFPT. */ 1041 erase_mask = spi_nor_sort_erase_mask(map, erase_mask); 1042 1043 /* 1044 * We need at least one 4-byte op code per read, program and erase 1045 * operation; the .read(), .write() and .erase() hooks share the 1046 * nor->addr_width value. 1047 */ 1048 if (!read_hwcaps || !pp_hwcaps || !erase_mask) 1049 goto out; 1050 1051 /* 1052 * Discard all operations from the 4-byte instruction set which are 1053 * not supported by this memory. 1054 */ 1055 params->hwcaps.mask &= ~discard_hwcaps; 1056 params->hwcaps.mask |= (read_hwcaps | pp_hwcaps); 1057 1058 /* Use the 4-byte address instruction set. */ 1059 for (i = 0; i < SNOR_CMD_READ_MAX; i++) { 1060 struct spi_nor_read_command *read_cmd = ¶ms->reads[i]; 1061 1062 read_cmd->opcode = spi_nor_convert_3to4_read(read_cmd->opcode); 1063 } 1064 1065 /* 4BAIT is the only SFDP table that indicates page program support. */ 1066 if (pp_hwcaps & SNOR_HWCAPS_PP) { 1067 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP], 1068 SPINOR_OP_PP_4B, SNOR_PROTO_1_1_1); 1069 /* 1070 * Since xSPI Page Program opcode is backward compatible with 1071 * Legacy SPI, use Legacy SPI opcode there as well. 1072 */ 1073 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_8_8_8_DTR], 1074 SPINOR_OP_PP_4B, SNOR_PROTO_8_8_8_DTR); 1075 } 1076 if (pp_hwcaps & SNOR_HWCAPS_PP_1_1_4) 1077 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_1_4], 1078 SPINOR_OP_PP_1_1_4_4B, 1079 SNOR_PROTO_1_1_4); 1080 if (pp_hwcaps & SNOR_HWCAPS_PP_1_4_4) 1081 spi_nor_set_pp_settings(¶ms_pp[SNOR_CMD_PP_1_4_4], 1082 SPINOR_OP_PP_1_4_4_4B, 1083 SNOR_PROTO_1_4_4); 1084 1085 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) { 1086 if (erase_mask & BIT(i)) 1087 erase_type[i].opcode = (dwords[1] >> 1088 erase_type[i].idx * 8) & 0xFF; 1089 else 1090 spi_nor_set_erase_type(&erase_type[i], 0u, 0xFF); 1091 } 1092 1093 /* 1094 * We set SNOR_F_HAS_4BAIT in order to skip spi_nor_set_4byte_opcodes() 1095 * later because we already did the conversion to 4byte opcodes. Also, 1096 * this latest function implements a legacy quirk for the erase size of 1097 * Spansion memory. However this quirk is no longer needed with new 1098 * SFDP compliant memories. 1099 */ 1100 nor->addr_width = 4; 1101 nor->flags |= SNOR_F_4B_OPCODES | SNOR_F_HAS_4BAIT; 1102 1103 /* fall through */ 1104 out: 1105 kfree(dwords); 1106 return ret; 1107 } 1108 1109 #define PROFILE1_DWORD1_RDSR_ADDR_BYTES BIT(29) 1110 #define PROFILE1_DWORD1_RDSR_DUMMY BIT(28) 1111 #define PROFILE1_DWORD1_RD_FAST_CMD GENMASK(15, 8) 1112 #define PROFILE1_DWORD4_DUMMY_200MHZ GENMASK(11, 7) 1113 #define PROFILE1_DWORD5_DUMMY_166MHZ GENMASK(31, 27) 1114 #define PROFILE1_DWORD5_DUMMY_133MHZ GENMASK(21, 17) 1115 #define PROFILE1_DWORD5_DUMMY_100MHZ GENMASK(11, 7) 1116 1117 /** 1118 * spi_nor_parse_profile1() - parse the xSPI Profile 1.0 table 1119 * @nor: pointer to a 'struct spi_nor' 1120 * @profile1_header: pointer to the 'struct sfdp_parameter_header' describing 1121 * the Profile 1.0 Table length and version. 1122 * 1123 * Return: 0 on success, -errno otherwise. 1124 */ 1125 static int spi_nor_parse_profile1(struct spi_nor *nor, 1126 const struct sfdp_parameter_header *profile1_header) 1127 { 1128 u32 *dwords, addr; 1129 size_t len; 1130 int ret; 1131 u8 dummy, opcode; 1132 1133 len = profile1_header->length * sizeof(*dwords); 1134 dwords = kmalloc(len, GFP_KERNEL); 1135 if (!dwords) 1136 return -ENOMEM; 1137 1138 addr = SFDP_PARAM_HEADER_PTP(profile1_header); 1139 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 1140 if (ret) 1141 goto out; 1142 1143 le32_to_cpu_array(dwords, profile1_header->length); 1144 1145 /* Get 8D-8D-8D fast read opcode and dummy cycles. */ 1146 opcode = FIELD_GET(PROFILE1_DWORD1_RD_FAST_CMD, dwords[0]); 1147 1148 /* Set the Read Status Register dummy cycles and dummy address bytes. */ 1149 if (dwords[0] & PROFILE1_DWORD1_RDSR_DUMMY) 1150 nor->params->rdsr_dummy = 8; 1151 else 1152 nor->params->rdsr_dummy = 4; 1153 1154 if (dwords[0] & PROFILE1_DWORD1_RDSR_ADDR_BYTES) 1155 nor->params->rdsr_addr_nbytes = 4; 1156 else 1157 nor->params->rdsr_addr_nbytes = 0; 1158 1159 /* 1160 * We don't know what speed the controller is running at. Find the 1161 * dummy cycles for the fastest frequency the flash can run at to be 1162 * sure we are never short of dummy cycles. A value of 0 means the 1163 * frequency is not supported. 1164 * 1165 * Default to PROFILE1_DUMMY_DEFAULT if we don't find anything, and let 1166 * flashes set the correct value if needed in their fixup hooks. 1167 */ 1168 dummy = FIELD_GET(PROFILE1_DWORD4_DUMMY_200MHZ, dwords[3]); 1169 if (!dummy) 1170 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_166MHZ, dwords[4]); 1171 if (!dummy) 1172 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_133MHZ, dwords[4]); 1173 if (!dummy) 1174 dummy = FIELD_GET(PROFILE1_DWORD5_DUMMY_100MHZ, dwords[4]); 1175 if (!dummy) 1176 dev_dbg(nor->dev, 1177 "Can't find dummy cycles from Profile 1.0 table\n"); 1178 1179 /* Round up to an even value to avoid tripping controllers up. */ 1180 dummy = round_up(dummy, 2); 1181 1182 /* Update the fast read settings. */ 1183 spi_nor_set_read_settings(&nor->params->reads[SNOR_CMD_READ_8_8_8_DTR], 1184 0, dummy, opcode, 1185 SNOR_PROTO_8_8_8_DTR); 1186 1187 out: 1188 kfree(dwords); 1189 return ret; 1190 } 1191 1192 #define SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE BIT(31) 1193 1194 /** 1195 * spi_nor_parse_sccr() - Parse the Status, Control and Configuration Register 1196 * Map. 1197 * @nor: pointer to a 'struct spi_nor' 1198 * @sccr_header: pointer to the 'struct sfdp_parameter_header' describing 1199 * the SCCR Map table length and version. 1200 * 1201 * Return: 0 on success, -errno otherwise. 1202 */ 1203 static int spi_nor_parse_sccr(struct spi_nor *nor, 1204 const struct sfdp_parameter_header *sccr_header) 1205 { 1206 u32 *dwords, addr; 1207 size_t len; 1208 int ret; 1209 1210 len = sccr_header->length * sizeof(*dwords); 1211 dwords = kmalloc(len, GFP_KERNEL); 1212 if (!dwords) 1213 return -ENOMEM; 1214 1215 addr = SFDP_PARAM_HEADER_PTP(sccr_header); 1216 ret = spi_nor_read_sfdp(nor, addr, len, dwords); 1217 if (ret) 1218 goto out; 1219 1220 le32_to_cpu_array(dwords, sccr_header->length); 1221 1222 if (FIELD_GET(SCCR_DWORD22_OCTAL_DTR_EN_VOLATILE, dwords[22])) 1223 nor->flags |= SNOR_F_IO_MODE_EN_VOLATILE; 1224 1225 out: 1226 kfree(dwords); 1227 return ret; 1228 } 1229 1230 /** 1231 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters. 1232 * @nor: pointer to a 'struct spi_nor' 1233 * 1234 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216 1235 * specification. This is a standard which tends to supported by almost all 1236 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at 1237 * runtime the main parameters needed to perform basic SPI flash operations such 1238 * as Fast Read, Page Program or Sector Erase commands. 1239 * 1240 * Return: 0 on success, -errno otherwise. 1241 */ 1242 int spi_nor_parse_sfdp(struct spi_nor *nor) 1243 { 1244 const struct sfdp_parameter_header *param_header, *bfpt_header; 1245 struct sfdp_parameter_header *param_headers = NULL; 1246 struct sfdp_header header; 1247 struct device *dev = nor->dev; 1248 size_t psize; 1249 int i, err; 1250 1251 /* Get the SFDP header. */ 1252 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header); 1253 if (err < 0) 1254 return err; 1255 1256 /* Check the SFDP header version. */ 1257 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || 1258 header.major != SFDP_JESD216_MAJOR) 1259 return -EINVAL; 1260 1261 /* 1262 * Verify that the first and only mandatory parameter header is a 1263 * Basic Flash Parameter Table header as specified in JESD216. 1264 */ 1265 bfpt_header = &header.bfpt_header; 1266 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID || 1267 bfpt_header->major != SFDP_JESD216_MAJOR) 1268 return -EINVAL; 1269 1270 /* 1271 * Allocate memory then read all parameter headers with a single 1272 * Read SFDP command. These parameter headers will actually be parsed 1273 * twice: a first time to get the latest revision of the basic flash 1274 * parameter table, then a second time to handle the supported optional 1275 * tables. 1276 * Hence we read the parameter headers once for all to reduce the 1277 * processing time. Also we use kmalloc() instead of devm_kmalloc() 1278 * because we don't need to keep these parameter headers: the allocated 1279 * memory is always released with kfree() before exiting this function. 1280 */ 1281 if (header.nph) { 1282 psize = header.nph * sizeof(*param_headers); 1283 1284 param_headers = kmalloc(psize, GFP_KERNEL); 1285 if (!param_headers) 1286 return -ENOMEM; 1287 1288 err = spi_nor_read_sfdp(nor, sizeof(header), 1289 psize, param_headers); 1290 if (err < 0) { 1291 dev_dbg(dev, "failed to read SFDP parameter headers\n"); 1292 goto exit; 1293 } 1294 } 1295 1296 /* 1297 * Check other parameter headers to get the latest revision of 1298 * the basic flash parameter table. 1299 */ 1300 for (i = 0; i < header.nph; i++) { 1301 param_header = ¶m_headers[i]; 1302 1303 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID && 1304 param_header->major == SFDP_JESD216_MAJOR && 1305 (param_header->minor > bfpt_header->minor || 1306 (param_header->minor == bfpt_header->minor && 1307 param_header->length > bfpt_header->length))) 1308 bfpt_header = param_header; 1309 } 1310 1311 err = spi_nor_parse_bfpt(nor, bfpt_header); 1312 if (err) 1313 goto exit; 1314 1315 /* Parse optional parameter tables. */ 1316 for (i = 0; i < header.nph; i++) { 1317 param_header = ¶m_headers[i]; 1318 1319 switch (SFDP_PARAM_HEADER_ID(param_header)) { 1320 case SFDP_SECTOR_MAP_ID: 1321 err = spi_nor_parse_smpt(nor, param_header); 1322 break; 1323 1324 case SFDP_4BAIT_ID: 1325 err = spi_nor_parse_4bait(nor, param_header); 1326 break; 1327 1328 case SFDP_PROFILE1_ID: 1329 err = spi_nor_parse_profile1(nor, param_header); 1330 break; 1331 1332 case SFDP_SCCR_MAP_ID: 1333 err = spi_nor_parse_sccr(nor, param_header); 1334 break; 1335 1336 default: 1337 break; 1338 } 1339 1340 if (err) { 1341 dev_warn(dev, "Failed to parse optional parameter table: %04x\n", 1342 SFDP_PARAM_HEADER_ID(param_header)); 1343 /* 1344 * Let's not drop all information we extracted so far 1345 * if optional table parsers fail. In case of failing, 1346 * each optional parser is responsible to roll back to 1347 * the previously known spi_nor data. 1348 */ 1349 err = 0; 1350 } 1351 } 1352 1353 exit: 1354 kfree(param_headers); 1355 return err; 1356 } 1357