1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 2007, Juniper Networks, Inc. 5 * Copyright (c) 2012-2013, SRI International 6 * All rights reserved. 7 * 8 * Portions of this software were developed by SRI International and the 9 * University of Cambridge Computer Laboratory under DARPA/AFRL contract 10 * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research 11 * programme. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the author nor the names of any co-contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #include "opt_cfi.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/bus.h> 46 #include <sys/conf.h> 47 #include <sys/endian.h> 48 #include <sys/kenv.h> 49 #include <sys/kernel.h> 50 #include <sys/malloc.h> 51 #include <sys/module.h> 52 #include <sys/rman.h> 53 #include <sys/sysctl.h> 54 55 #include <machine/bus.h> 56 57 #include <dev/cfi/cfi_reg.h> 58 #include <dev/cfi/cfi_var.h> 59 60 static void cfi_add_sysctls(struct cfi_softc *); 61 62 extern struct cdevsw cfi_cdevsw; 63 64 char cfi_driver_name[] = "cfi"; 65 devclass_t cfi_devclass; 66 devclass_t cfi_diskclass; 67 68 uint32_t 69 cfi_read_raw(struct cfi_softc *sc, u_int ofs) 70 { 71 uint32_t val; 72 73 ofs &= ~(sc->sc_width - 1); 74 switch (sc->sc_width) { 75 case 1: 76 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs); 77 break; 78 case 2: 79 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs); 80 break; 81 case 4: 82 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs); 83 break; 84 default: 85 val = ~0; 86 break; 87 } 88 return (val); 89 } 90 91 uint32_t 92 cfi_read(struct cfi_softc *sc, u_int ofs) 93 { 94 uint32_t val; 95 uint16_t sval; 96 97 ofs &= ~(sc->sc_width - 1); 98 switch (sc->sc_width) { 99 case 1: 100 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs); 101 break; 102 case 2: 103 sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs); 104 #ifdef CFI_HARDWAREBYTESWAP 105 val = sval; 106 #else 107 val = le16toh(sval); 108 #endif 109 break; 110 case 4: 111 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs); 112 #ifndef CFI_HARDWAREBYTESWAP 113 val = le32toh(val); 114 #endif 115 break; 116 default: 117 val = ~0; 118 break; 119 } 120 return (val); 121 } 122 123 static void 124 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val) 125 { 126 127 ofs &= ~(sc->sc_width - 1); 128 switch (sc->sc_width) { 129 case 1: 130 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val); 131 break; 132 case 2: 133 #ifdef CFI_HARDWAREBYTESWAP 134 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val); 135 #else 136 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val)); 137 138 #endif 139 break; 140 case 4: 141 #ifdef CFI_HARDWAREBYTESWAP 142 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val); 143 #else 144 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val)); 145 #endif 146 break; 147 } 148 } 149 150 /* 151 * This is same workaound as NetBSD sys/dev/nor/cfi.c cfi_reset_default() 152 */ 153 static void 154 cfi_reset_default(struct cfi_softc *sc) 155 { 156 157 cfi_write(sc, 0, CFI_BCS_READ_ARRAY2); 158 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 159 } 160 161 uint8_t 162 cfi_read_qry(struct cfi_softc *sc, u_int ofs) 163 { 164 uint8_t val; 165 166 cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA); 167 val = cfi_read(sc, ofs * sc->sc_width); 168 cfi_reset_default(sc); 169 return (val); 170 } 171 172 static void 173 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data) 174 { 175 176 cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK); 177 cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK); 178 cfi_write(sc, ofs + addr, data); 179 } 180 181 static char * 182 cfi_fmtsize(uint32_t sz) 183 { 184 static char buf[8]; 185 static const char *sfx[] = { "", "K", "M", "G" }; 186 int sfxidx; 187 188 sfxidx = 0; 189 while (sfxidx < 3 && sz > 1023) { 190 sz /= 1024; 191 sfxidx++; 192 } 193 194 sprintf(buf, "%u%sB", sz, sfx[sfxidx]); 195 return (buf); 196 } 197 198 int 199 cfi_probe(device_t dev) 200 { 201 char desc[80]; 202 struct cfi_softc *sc; 203 char *vend_str; 204 int error; 205 uint16_t iface, vend; 206 207 sc = device_get_softc(dev); 208 sc->sc_dev = dev; 209 210 sc->sc_rid = 0; 211 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, 212 RF_ACTIVE); 213 if (sc->sc_res == NULL) 214 return (ENXIO); 215 216 sc->sc_tag = rman_get_bustag(sc->sc_res); 217 sc->sc_handle = rman_get_bushandle(sc->sc_res); 218 219 if (sc->sc_width == 0) { 220 sc->sc_width = 1; 221 while (sc->sc_width <= 4) { 222 if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q') 223 break; 224 sc->sc_width <<= 1; 225 } 226 } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') { 227 error = ENXIO; 228 goto out; 229 } 230 if (sc->sc_width > 4) { 231 error = ENXIO; 232 goto out; 233 } 234 235 /* We got a Q. Check if we also have the R and the Y. */ 236 if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' || 237 cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') { 238 error = ENXIO; 239 goto out; 240 } 241 242 /* Get the vendor and command set. */ 243 vend = cfi_read_qry(sc, CFI_QRY_VEND) | 244 (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8); 245 246 sc->sc_cmdset = vend; 247 248 switch (vend) { 249 case CFI_VEND_AMD_ECS: 250 case CFI_VEND_AMD_SCS: 251 vend_str = "AMD/Fujitsu"; 252 break; 253 case CFI_VEND_INTEL_ECS: 254 vend_str = "Intel/Sharp"; 255 break; 256 case CFI_VEND_INTEL_SCS: 257 vend_str = "Intel"; 258 break; 259 case CFI_VEND_MITSUBISHI_ECS: 260 case CFI_VEND_MITSUBISHI_SCS: 261 vend_str = "Mitsubishi"; 262 break; 263 default: 264 vend_str = "Unknown vendor"; 265 break; 266 } 267 268 /* Get the device size. */ 269 sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE); 270 271 /* Sanity-check the I/F */ 272 iface = cfi_read_qry(sc, CFI_QRY_IFACE) | 273 (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8); 274 275 /* 276 * Adding 1 to iface will give us a bit-wise "switch" 277 * that allows us to test for the interface width by 278 * testing a single bit. 279 */ 280 iface++; 281 282 error = (iface & sc->sc_width) ? 0 : EINVAL; 283 if (error) 284 goto out; 285 286 snprintf(desc, sizeof(desc), "%s - %s", vend_str, 287 cfi_fmtsize(sc->sc_size)); 288 device_set_desc_copy(dev, desc); 289 290 out: 291 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res); 292 return (error); 293 } 294 295 int 296 cfi_attach(device_t dev) 297 { 298 struct cfi_softc *sc; 299 u_int blksz, blocks; 300 u_int r, u; 301 uint64_t mtoexp, ttoexp; 302 #ifdef CFI_SUPPORT_STRATAFLASH 303 uint64_t ppr; 304 char name[KENV_MNAMELEN], value[32]; 305 #endif 306 307 sc = device_get_softc(dev); 308 sc->sc_dev = dev; 309 310 sc->sc_rid = 0; 311 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, 312 #ifndef ATSE_CFI_HACK 313 RF_ACTIVE); 314 #else 315 RF_ACTIVE | RF_SHAREABLE); 316 #endif 317 if (sc->sc_res == NULL) 318 return (ENXIO); 319 320 sc->sc_tag = rman_get_bustag(sc->sc_res); 321 sc->sc_handle = rman_get_bushandle(sc->sc_res); 322 323 /* Get time-out values for erase, write, and buffer write. */ 324 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE); 325 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE); 326 if (ttoexp == 0) { 327 device_printf(dev, "erase timeout == 0, using 2^16ms\n"); 328 ttoexp = 16; 329 } 330 if (ttoexp > 41) { 331 device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp); 332 return (EINVAL); 333 } 334 if (mtoexp == 0) { 335 device_printf(dev, "max erase timeout == 0, using 2^%jdms\n", 336 ttoexp + 4); 337 mtoexp = 4; 338 } 339 if (ttoexp + mtoexp > 41) { 340 device_printf(dev, "insane max erase timeout: 2^%jd\n", 341 ttoexp + mtoexp); 342 return (EINVAL); 343 } 344 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp); 345 sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] = 346 sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp); 347 348 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE); 349 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE); 350 if (ttoexp == 0) { 351 device_printf(dev, "write timeout == 0, using 2^18ns\n"); 352 ttoexp = 18; 353 } 354 if (ttoexp > 51) { 355 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp); 356 return (EINVAL); 357 } 358 if (mtoexp == 0) { 359 device_printf(dev, "max write timeout == 0, using 2^%jdms\n", 360 ttoexp + 4); 361 mtoexp = 4; 362 } 363 if (ttoexp + mtoexp > 51) { 364 device_printf(dev, "insane max write timeout: 2^%jdus\n", 365 ttoexp + mtoexp); 366 return (EINVAL); 367 } 368 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp); 369 sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] = 370 sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp); 371 372 ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE); 373 mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE); 374 /* Don't check for 0, it means not-supported. */ 375 if (ttoexp > 51) { 376 device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp); 377 return (EINVAL); 378 } 379 if (ttoexp + mtoexp > 51) { 380 device_printf(dev, "insane max write timeout: 2^%jdus\n", 381 ttoexp + mtoexp); 382 return (EINVAL); 383 } 384 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] = 385 SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE)); 386 sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] = 387 sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] * 388 (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE)); 389 390 /* Get the maximum size of a multibyte program */ 391 if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0) 392 sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) | 393 cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8); 394 else 395 sc->sc_maxbuf = 0; 396 397 /* Get erase regions. */ 398 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS); 399 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region), 400 M_TEMP, M_WAITOK | M_ZERO); 401 for (r = 0; r < sc->sc_regions; r++) { 402 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) | 403 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8); 404 sc->sc_region[r].r_blocks = blocks + 1; 405 406 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) | 407 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8); 408 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 : 409 blksz * 256; 410 } 411 412 /* Reset the device to a default state. */ 413 cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS); 414 415 if (bootverbose) { 416 device_printf(dev, "["); 417 for (r = 0; r < sc->sc_regions; r++) { 418 printf("%ux%s%s", sc->sc_region[r].r_blocks, 419 cfi_fmtsize(sc->sc_region[r].r_blksz), 420 (r == sc->sc_regions - 1) ? "]\n" : ","); 421 } 422 } 423 424 u = device_get_unit(dev); 425 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600, 426 "%s%u", cfi_driver_name, u); 427 sc->sc_nod->si_drv1 = sc; 428 429 cfi_add_sysctls(sc); 430 431 #ifdef CFI_SUPPORT_STRATAFLASH 432 /* 433 * Store the Intel factory PPR in the environment. In some 434 * cases it is the most unique ID on a board. 435 */ 436 if (cfi_intel_get_factory_pr(sc, &ppr) == 0) { 437 if (snprintf(name, sizeof(name), "%s.factory_ppr", 438 device_get_nameunit(dev)) < (sizeof(name) - 1) && 439 snprintf(value, sizeof(value), "0x%016jx", ppr) < 440 (sizeof(value) - 1)) 441 (void) kern_setenv(name, value); 442 } 443 #endif 444 445 device_add_child(dev, "cfid", -1); 446 bus_generic_attach(dev); 447 448 return (0); 449 } 450 451 static void 452 cfi_add_sysctls(struct cfi_softc *sc) 453 { 454 struct sysctl_ctx_list *ctx; 455 struct sysctl_oid_list *children; 456 457 ctx = device_get_sysctl_ctx(sc->sc_dev); 458 children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev)); 459 460 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 461 "typical_erase_timout_count", 462 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE], 463 0, "Number of times the typical erase timeout was exceeded"); 464 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 465 "max_erase_timout_count", 466 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0, 467 "Number of times the maximum erase timeout was exceeded"); 468 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 469 "typical_write_timout_count", 470 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0, 471 "Number of times the typical write timeout was exceeded"); 472 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 473 "max_write_timout_count", 474 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0, 475 "Number of times the maximum write timeout was exceeded"); 476 if (sc->sc_maxbuf > 0) { 477 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 478 "typical_bufwrite_timout_count", 479 CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0, 480 "Number of times the typical buffered write timeout was " 481 "exceeded"); 482 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, 483 "max_bufwrite_timout_count", 484 CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0, 485 "Number of times the maximum buffered write timeout was " 486 "exceeded"); 487 } 488 } 489 490 int 491 cfi_detach(device_t dev) 492 { 493 struct cfi_softc *sc; 494 495 sc = device_get_softc(dev); 496 497 destroy_dev(sc->sc_nod); 498 free(sc->sc_region, M_TEMP); 499 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res); 500 return (0); 501 } 502 503 static int 504 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start, 505 enum cfi_wait_cmd cmd) 506 { 507 int done, error, tto_exceeded; 508 uint32_t st0 = 0, st = 0; 509 sbintime_t now; 510 511 done = 0; 512 error = 0; 513 tto_exceeded = 0; 514 while (!done && !error) { 515 /* 516 * Save time before we start so we always do one check 517 * after the timeout has expired. 518 */ 519 now = sbinuptime(); 520 521 switch (sc->sc_cmdset) { 522 case CFI_VEND_INTEL_ECS: 523 case CFI_VEND_INTEL_SCS: 524 st = cfi_read(sc, ofs); 525 done = (st & CFI_INTEL_STATUS_WSMS); 526 if (done) { 527 /* NB: bit 0 is reserved */ 528 st &= ~(CFI_INTEL_XSTATUS_RSVD | 529 CFI_INTEL_STATUS_WSMS | 530 CFI_INTEL_STATUS_RSVD); 531 if (st & CFI_INTEL_STATUS_DPS) 532 error = EPERM; 533 else if (st & CFI_INTEL_STATUS_PSLBS) 534 error = EIO; 535 else if (st & CFI_INTEL_STATUS_ECLBS) 536 error = ENXIO; 537 else if (st) 538 error = EACCES; 539 } 540 break; 541 case CFI_VEND_AMD_SCS: 542 case CFI_VEND_AMD_ECS: 543 st0 = cfi_read(sc, ofs); 544 st = cfi_read(sc, ofs); 545 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0; 546 break; 547 } 548 549 if (tto_exceeded || 550 now > start + sc->sc_typical_timeouts[cmd]) { 551 if (!tto_exceeded) { 552 tto_exceeded = 1; 553 sc->sc_tto_counts[cmd]++; 554 #ifdef CFI_DEBUG_TIMEOUT 555 device_printf(sc->sc_dev, 556 "typical timeout exceeded (cmd %d)", cmd); 557 #endif 558 } 559 if (now > start + sc->sc_max_timeouts[cmd]) { 560 sc->sc_mto_counts[cmd]++; 561 #ifdef CFI_DEBUG_TIMEOUT 562 device_printf(sc->sc_dev, 563 "max timeout exceeded (cmd %d)", cmd); 564 #endif 565 } 566 } 567 } 568 if (!done && !error) 569 error = ETIMEDOUT; 570 if (error) 571 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0); 572 return (error); 573 } 574 575 int 576 cfi_write_block(struct cfi_softc *sc) 577 { 578 union { 579 uint8_t *x8; 580 uint16_t *x16; 581 uint32_t *x32; 582 } ptr, cpyprt; 583 register_t intr; 584 int error, i, neederase = 0; 585 uint32_t st; 586 u_int wlen; 587 sbintime_t start; 588 589 /* Intel flash must be unlocked before modification */ 590 switch (sc->sc_cmdset) { 591 case CFI_VEND_INTEL_ECS: 592 case CFI_VEND_INTEL_SCS: 593 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS); 594 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB); 595 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY); 596 break; 597 } 598 599 /* Check if an erase is required. */ 600 for (i = 0; i < sc->sc_wrbufsz; i++) 601 if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) { 602 neederase = 1; 603 break; 604 } 605 606 if (neederase) { 607 intr = intr_disable(); 608 start = sbinuptime(); 609 /* Erase the block. */ 610 switch (sc->sc_cmdset) { 611 case CFI_VEND_INTEL_ECS: 612 case CFI_VEND_INTEL_SCS: 613 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE); 614 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM); 615 break; 616 case CFI_VEND_AMD_SCS: 617 case CFI_VEND_AMD_ECS: 618 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START, 619 CFI_AMD_ERASE_SECTOR); 620 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE); 621 break; 622 default: 623 /* Better safe than sorry... */ 624 intr_restore(intr); 625 return (ENODEV); 626 } 627 intr_restore(intr); 628 error = cfi_wait_ready(sc, sc->sc_wrofs, start, 629 CFI_TIMEOUT_ERASE); 630 if (error) 631 goto out; 632 } else 633 error = 0; 634 635 /* Write the block using a multibyte write if supported. */ 636 ptr.x8 = sc->sc_wrbuf; 637 cpyprt.x8 = sc->sc_wrbufcpy; 638 if (sc->sc_maxbuf > sc->sc_width) { 639 switch (sc->sc_cmdset) { 640 case CFI_VEND_INTEL_ECS: 641 case CFI_VEND_INTEL_SCS: 642 for (i = 0; i < sc->sc_wrbufsz; i += wlen) { 643 wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i); 644 645 intr = intr_disable(); 646 647 start = sbinuptime(); 648 do { 649 cfi_write(sc, sc->sc_wrofs + i, 650 CFI_BCS_BUF_PROG_SETUP); 651 if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) { 652 error = ETIMEDOUT; 653 goto out; 654 } 655 st = cfi_read(sc, sc->sc_wrofs + i); 656 } while (! (st & CFI_INTEL_STATUS_WSMS)); 657 658 cfi_write(sc, sc->sc_wrofs + i, 659 (wlen / sc->sc_width) - 1); 660 switch (sc->sc_width) { 661 case 1: 662 bus_space_write_region_1(sc->sc_tag, 663 sc->sc_handle, sc->sc_wrofs + i, 664 ptr.x8 + i, wlen); 665 break; 666 case 2: 667 bus_space_write_region_2(sc->sc_tag, 668 sc->sc_handle, sc->sc_wrofs + i, 669 ptr.x16 + i / 2, wlen / 2); 670 break; 671 case 4: 672 bus_space_write_region_4(sc->sc_tag, 673 sc->sc_handle, sc->sc_wrofs + i, 674 ptr.x32 + i / 4, wlen / 4); 675 break; 676 } 677 678 cfi_write(sc, sc->sc_wrofs + i, 679 CFI_BCS_CONFIRM); 680 681 intr_restore(intr); 682 683 error = cfi_wait_ready(sc, sc->sc_wrofs + i, 684 start, CFI_TIMEOUT_BUFWRITE); 685 if (error != 0) 686 goto out; 687 } 688 goto out; 689 default: 690 /* Fall through to single word case */ 691 break; 692 } 693 694 } 695 696 /* Write the block one byte/word at a time. */ 697 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) { 698 699 /* Avoid writing unless we are actually changing bits */ 700 if (!neederase) { 701 switch (sc->sc_width) { 702 case 1: 703 if(*(ptr.x8 + i) == *(cpyprt.x8 + i)) 704 continue; 705 break; 706 case 2: 707 if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2)) 708 continue; 709 break; 710 case 4: 711 if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4)) 712 continue; 713 break; 714 } 715 } 716 717 /* 718 * Make sure the command to start a write and the 719 * actual write happens back-to-back without any 720 * excessive delays. 721 */ 722 intr = intr_disable(); 723 724 start = sbinuptime(); 725 switch (sc->sc_cmdset) { 726 case CFI_VEND_INTEL_ECS: 727 case CFI_VEND_INTEL_SCS: 728 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM); 729 break; 730 case CFI_VEND_AMD_SCS: 731 case CFI_VEND_AMD_ECS: 732 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM); 733 break; 734 } 735 switch (sc->sc_width) { 736 case 1: 737 bus_space_write_1(sc->sc_tag, sc->sc_handle, 738 sc->sc_wrofs + i, *(ptr.x8 + i)); 739 break; 740 case 2: 741 bus_space_write_2(sc->sc_tag, sc->sc_handle, 742 sc->sc_wrofs + i, *(ptr.x16 + i / 2)); 743 break; 744 case 4: 745 bus_space_write_4(sc->sc_tag, sc->sc_handle, 746 sc->sc_wrofs + i, *(ptr.x32 + i / 4)); 747 break; 748 } 749 750 intr_restore(intr); 751 752 error = cfi_wait_ready(sc, sc->sc_wrofs, start, 753 CFI_TIMEOUT_WRITE); 754 if (error) 755 goto out; 756 } 757 758 /* error is 0. */ 759 760 out: 761 cfi_reset_default(sc); 762 763 /* Relock Intel flash */ 764 switch (sc->sc_cmdset) { 765 case CFI_VEND_INTEL_ECS: 766 case CFI_VEND_INTEL_SCS: 767 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS); 768 cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB); 769 cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY); 770 break; 771 } 772 return (error); 773 } 774 775 #ifdef CFI_SUPPORT_STRATAFLASH 776 /* 777 * Intel StrataFlash Protection Register Support. 778 * 779 * The memory includes a 128-bit Protection Register that can be 780 * used for security. There are two 64-bit segments; one is programmed 781 * at the factory with a unique 64-bit number which is immutable. 782 * The other segment is left blank for User (OEM) programming. 783 * The User/OEM segment is One Time Programmable (OTP). It can also 784 * be locked to prevent any further writes by setting bit 0 of the 785 * Protection Lock Register (PLR). The PLR can written only once. 786 */ 787 788 static uint16_t 789 cfi_get16(struct cfi_softc *sc, int off) 790 { 791 uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1); 792 return v; 793 } 794 795 #ifdef CFI_ARMEDANDDANGEROUS 796 static void 797 cfi_put16(struct cfi_softc *sc, int off, uint16_t v) 798 { 799 bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v); 800 } 801 #endif 802 803 /* 804 * Read the factory-defined 64-bit segment of the PR. 805 */ 806 int 807 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id) 808 { 809 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 810 return EOPNOTSUPP; 811 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 812 813 cfi_write(sc, 0, CFI_INTEL_READ_ID); 814 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 | 815 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 | 816 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 | 817 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3))); 818 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 819 return 0; 820 } 821 822 /* 823 * Read the User/OEM 64-bit segment of the PR. 824 */ 825 int 826 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id) 827 { 828 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 829 return EOPNOTSUPP; 830 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 831 832 cfi_write(sc, 0, CFI_INTEL_READ_ID); 833 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 | 834 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 | 835 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 | 836 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7))); 837 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 838 return 0; 839 } 840 841 /* 842 * Write the User/OEM 64-bit segment of the PR. 843 * XXX should allow writing individual words/bytes 844 */ 845 int 846 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id) 847 { 848 #ifdef CFI_ARMEDANDDANGEROUS 849 register_t intr; 850 int i, error; 851 sbintime_t start; 852 #endif 853 854 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 855 return EOPNOTSUPP; 856 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 857 858 #ifdef CFI_ARMEDANDDANGEROUS 859 for (i = 7; i >= 4; i--, id >>= 16) { 860 intr = intr_disable(); 861 start = sbinuptime(); 862 cfi_write(sc, 0, CFI_INTEL_PP_SETUP); 863 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff); 864 intr_restore(intr); 865 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start, 866 CFI_TIMEOUT_WRITE); 867 if (error) 868 break; 869 } 870 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 871 return error; 872 #else 873 device_printf(sc->sc_dev, "%s: OEM PR not set, " 874 "CFI_ARMEDANDDANGEROUS not configured\n", __func__); 875 return ENXIO; 876 #endif 877 } 878 879 /* 880 * Read the contents of the Protection Lock Register. 881 */ 882 int 883 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr) 884 { 885 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 886 return EOPNOTSUPP; 887 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 888 889 cfi_write(sc, 0, CFI_INTEL_READ_ID); 890 *plr = cfi_get16(sc, CFI_INTEL_PLR); 891 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 892 return 0; 893 } 894 895 /* 896 * Write the Protection Lock Register to lock down the 897 * user-settable segment of the Protection Register. 898 * NOTE: this operation is not reversible. 899 */ 900 int 901 cfi_intel_set_plr(struct cfi_softc *sc) 902 { 903 #ifdef CFI_ARMEDANDDANGEROUS 904 register_t intr; 905 int error; 906 sbintime_t start; 907 #endif 908 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 909 return EOPNOTSUPP; 910 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 911 912 #ifdef CFI_ARMEDANDDANGEROUS 913 /* worthy of console msg */ 914 device_printf(sc->sc_dev, "set PLR\n"); 915 intr = intr_disable(); 916 binuptime(&start); 917 cfi_write(sc, 0, CFI_INTEL_PP_SETUP); 918 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD); 919 intr_restore(intr); 920 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start, 921 CFI_TIMEOUT_WRITE); 922 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 923 return error; 924 #else 925 device_printf(sc->sc_dev, "%s: PLR not set, " 926 "CFI_ARMEDANDDANGEROUS not configured\n", __func__); 927 return ENXIO; 928 #endif 929 } 930 #endif /* CFI_SUPPORT_STRATAFLASH */ 931