1 /*- 2 * Copyright (c) 2007, Juniper Networks, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the author nor the names of any co-contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_cfi.h" 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/bus.h> 38 #include <sys/conf.h> 39 #include <sys/kernel.h> 40 #include <sys/malloc.h> 41 #include <sys/module.h> 42 #include <sys/rman.h> 43 #include <sys/sysctl.h> 44 45 #include <machine/bus.h> 46 47 #include <dev/cfi/cfi_reg.h> 48 #include <dev/cfi/cfi_var.h> 49 50 extern struct cdevsw cfi_cdevsw; 51 52 char cfi_driver_name[] = "cfi"; 53 devclass_t cfi_devclass; 54 devclass_t cfi_diskclass; 55 56 uint32_t 57 cfi_read(struct cfi_softc *sc, u_int ofs) 58 { 59 uint32_t val; 60 61 ofs &= ~(sc->sc_width - 1); 62 switch (sc->sc_width) { 63 case 1: 64 val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs); 65 break; 66 case 2: 67 val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs); 68 break; 69 case 4: 70 val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs); 71 break; 72 default: 73 val = ~0; 74 break; 75 } 76 return (val); 77 } 78 79 static void 80 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val) 81 { 82 83 ofs &= ~(sc->sc_width - 1); 84 switch (sc->sc_width) { 85 case 1: 86 bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val); 87 break; 88 case 2: 89 bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val); 90 break; 91 case 4: 92 bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val); 93 break; 94 } 95 } 96 97 uint8_t 98 cfi_read_qry(struct cfi_softc *sc, u_int ofs) 99 { 100 uint8_t val; 101 102 cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA); 103 val = cfi_read(sc, ofs * sc->sc_width); 104 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 105 return (val); 106 } 107 108 static void 109 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data) 110 { 111 112 cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK); 113 cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK); 114 cfi_write(sc, ofs + addr, data); 115 } 116 117 static char * 118 cfi_fmtsize(uint32_t sz) 119 { 120 static char buf[8]; 121 static const char *sfx[] = { "", "K", "M", "G" }; 122 int sfxidx; 123 124 sfxidx = 0; 125 while (sfxidx < 3 && sz > 1023) { 126 sz /= 1024; 127 sfxidx++; 128 } 129 130 sprintf(buf, "%u%sB", sz, sfx[sfxidx]); 131 return (buf); 132 } 133 134 int 135 cfi_probe(device_t dev) 136 { 137 char desc[80]; 138 struct cfi_softc *sc; 139 char *vend_str; 140 int error; 141 uint16_t iface, vend; 142 143 sc = device_get_softc(dev); 144 sc->sc_dev = dev; 145 146 sc->sc_rid = 0; 147 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, 148 RF_ACTIVE); 149 if (sc->sc_res == NULL) 150 return (ENXIO); 151 152 sc->sc_tag = rman_get_bustag(sc->sc_res); 153 sc->sc_handle = rman_get_bushandle(sc->sc_res); 154 155 if (sc->sc_width == 0) { 156 sc->sc_width = 1; 157 while (sc->sc_width <= 4) { 158 if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q') 159 break; 160 sc->sc_width <<= 1; 161 } 162 } else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') { 163 error = ENXIO; 164 goto out; 165 } 166 if (sc->sc_width > 4) { 167 error = ENXIO; 168 goto out; 169 } 170 171 /* We got a Q. Check if we also have the R and the Y. */ 172 if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' || 173 cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') { 174 error = ENXIO; 175 goto out; 176 } 177 178 /* Get the vendor and command set. */ 179 vend = cfi_read_qry(sc, CFI_QRY_VEND) | 180 (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8); 181 182 sc->sc_cmdset = vend; 183 184 switch (vend) { 185 case CFI_VEND_AMD_ECS: 186 case CFI_VEND_AMD_SCS: 187 vend_str = "AMD/Fujitsu"; 188 break; 189 case CFI_VEND_INTEL_ECS: 190 vend_str = "Intel/Sharp"; 191 break; 192 case CFI_VEND_INTEL_SCS: 193 vend_str = "Intel"; 194 break; 195 case CFI_VEND_MITSUBISHI_ECS: 196 case CFI_VEND_MITSUBISHI_SCS: 197 vend_str = "Mitsubishi"; 198 break; 199 default: 200 vend_str = "Unknown vendor"; 201 break; 202 } 203 204 /* Get the device size. */ 205 sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE); 206 207 /* Sanity-check the I/F */ 208 iface = cfi_read_qry(sc, CFI_QRY_IFACE) | 209 (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8); 210 211 /* 212 * Adding 1 to iface will give us a bit-wise "switch" 213 * that allows us to test for the interface width by 214 * testing a single bit. 215 */ 216 iface++; 217 218 error = (iface & sc->sc_width) ? 0 : EINVAL; 219 if (error) 220 goto out; 221 222 snprintf(desc, sizeof(desc), "%s - %s", vend_str, 223 cfi_fmtsize(sc->sc_size)); 224 device_set_desc_copy(dev, desc); 225 226 out: 227 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res); 228 return (error); 229 } 230 231 int 232 cfi_attach(device_t dev) 233 { 234 struct cfi_softc *sc; 235 u_int blksz, blocks; 236 u_int r, u; 237 238 sc = device_get_softc(dev); 239 sc->sc_dev = dev; 240 241 sc->sc_rid = 0; 242 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid, 243 RF_ACTIVE); 244 if (sc->sc_res == NULL) 245 return (ENXIO); 246 247 sc->sc_tag = rman_get_bustag(sc->sc_res); 248 sc->sc_handle = rman_get_bushandle(sc->sc_res); 249 250 /* Get time-out values for erase and write. */ 251 sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE); 252 sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE); 253 sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE); 254 sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE); 255 256 /* Get erase regions. */ 257 sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS); 258 sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region), 259 M_TEMP, M_WAITOK | M_ZERO); 260 for (r = 0; r < sc->sc_regions; r++) { 261 blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) | 262 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8); 263 sc->sc_region[r].r_blocks = blocks + 1; 264 265 blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) | 266 (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8); 267 sc->sc_region[r].r_blksz = (blksz == 0) ? 128 : 268 blksz * 256; 269 } 270 271 /* Reset the device to a default state. */ 272 cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS); 273 274 if (bootverbose) { 275 device_printf(dev, "["); 276 for (r = 0; r < sc->sc_regions; r++) { 277 printf("%ux%s%s", sc->sc_region[r].r_blocks, 278 cfi_fmtsize(sc->sc_region[r].r_blksz), 279 (r == sc->sc_regions - 1) ? "]\n" : ","); 280 } 281 } 282 283 u = device_get_unit(dev); 284 sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600, 285 "%s%u", cfi_driver_name, u); 286 sc->sc_nod->si_drv1 = sc; 287 288 device_add_child(dev, "cfid", 289 devclass_find_free_unit(cfi_diskclass, 0)); 290 bus_generic_attach(dev); 291 292 return (0); 293 } 294 295 int 296 cfi_detach(device_t dev) 297 { 298 struct cfi_softc *sc; 299 300 sc = device_get_softc(dev); 301 302 destroy_dev(sc->sc_nod); 303 free(sc->sc_region, M_TEMP); 304 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res); 305 return (0); 306 } 307 308 static int 309 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout) 310 { 311 int done, error; 312 uint32_t st0 = 0, st = 0; 313 314 done = 0; 315 error = 0; 316 timeout *= 10; 317 while (!done && !error && timeout) { 318 DELAY(100); 319 timeout--; 320 321 switch (sc->sc_cmdset) { 322 case CFI_VEND_INTEL_ECS: 323 case CFI_VEND_INTEL_SCS: 324 st = cfi_read(sc, ofs); 325 done = (st & CFI_INTEL_STATUS_WSMS); 326 if (done) { 327 /* NB: bit 0 is reserved */ 328 st &= ~(CFI_INTEL_XSTATUS_RSVD | 329 CFI_INTEL_STATUS_WSMS | 330 CFI_INTEL_STATUS_RSVD); 331 if (st & CFI_INTEL_STATUS_DPS) 332 error = EPERM; 333 else if (st & CFI_INTEL_STATUS_PSLBS) 334 error = EIO; 335 else if (st & CFI_INTEL_STATUS_ECLBS) 336 error = ENXIO; 337 else if (st) 338 error = EACCES; 339 } 340 break; 341 case CFI_VEND_AMD_SCS: 342 case CFI_VEND_AMD_ECS: 343 st0 = cfi_read(sc, ofs); 344 st = cfi_read(sc, ofs); 345 done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0; 346 break; 347 } 348 } 349 if (!done && !error) 350 error = ETIMEDOUT; 351 if (error) 352 printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0); 353 return (error); 354 } 355 356 int 357 cfi_write_block(struct cfi_softc *sc) 358 { 359 union { 360 uint8_t *x8; 361 uint16_t *x16; 362 uint32_t *x32; 363 } ptr; 364 register_t intr; 365 int error, i; 366 367 /* Erase the block. */ 368 switch (sc->sc_cmdset) { 369 case CFI_VEND_INTEL_ECS: 370 case CFI_VEND_INTEL_SCS: 371 cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE); 372 cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM); 373 break; 374 case CFI_VEND_AMD_SCS: 375 case CFI_VEND_AMD_ECS: 376 cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START, 377 CFI_AMD_ERASE_SECTOR); 378 cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE); 379 break; 380 default: 381 /* Better safe than sorry... */ 382 return (ENODEV); 383 } 384 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout); 385 if (error) 386 goto out; 387 388 /* Write the block. */ 389 ptr.x8 = sc->sc_wrbuf; 390 for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) { 391 392 /* 393 * Make sure the command to start a write and the 394 * actual write happens back-to-back without any 395 * excessive delays. 396 */ 397 intr = intr_disable(); 398 399 switch (sc->sc_cmdset) { 400 case CFI_VEND_INTEL_ECS: 401 case CFI_VEND_INTEL_SCS: 402 cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM); 403 break; 404 case CFI_VEND_AMD_SCS: 405 case CFI_VEND_AMD_ECS: 406 cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM); 407 break; 408 } 409 switch (sc->sc_width) { 410 case 1: 411 bus_space_write_1(sc->sc_tag, sc->sc_handle, 412 sc->sc_wrofs + i, *(ptr.x8)++); 413 break; 414 case 2: 415 bus_space_write_2(sc->sc_tag, sc->sc_handle, 416 sc->sc_wrofs + i, *(ptr.x16)++); 417 break; 418 case 4: 419 bus_space_write_4(sc->sc_tag, sc->sc_handle, 420 sc->sc_wrofs + i, *(ptr.x32)++); 421 break; 422 } 423 424 intr_restore(intr); 425 426 error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout); 427 if (error) 428 goto out; 429 } 430 431 /* error is 0. */ 432 433 out: 434 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 435 return (error); 436 } 437 438 #ifdef CFI_SUPPORT_STRATAFLASH 439 /* 440 * Intel StrataFlash Protection Register Support. 441 * 442 * The memory includes a 128-bit Protection Register that can be 443 * used for security. There are two 64-bit segments; one is programmed 444 * at the factory with a unique 64-bit number which is immutable. 445 * The other segment is left blank for User (OEM) programming. 446 * The User/OEM segment is One Time Programmable (OTP). It can also 447 * be locked to prevent any further writes by setting bit 0 of the 448 * Protection Lock Register (PLR). The PLR can written only once. 449 */ 450 451 static uint16_t 452 cfi_get16(struct cfi_softc *sc, int off) 453 { 454 uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1); 455 return v; 456 } 457 458 #ifdef CFI_ARMEDANDDANGEROUS 459 static void 460 cfi_put16(struct cfi_softc *sc, int off, uint16_t v) 461 { 462 bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v); 463 } 464 #endif 465 466 /* 467 * Read the factory-defined 64-bit segment of the PR. 468 */ 469 int 470 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id) 471 { 472 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 473 return EOPNOTSUPP; 474 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 475 476 cfi_write(sc, 0, CFI_INTEL_READ_ID); 477 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 | 478 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 | 479 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 | 480 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3))); 481 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 482 return 0; 483 } 484 485 /* 486 * Read the User/OEM 64-bit segment of the PR. 487 */ 488 int 489 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id) 490 { 491 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 492 return EOPNOTSUPP; 493 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 494 495 cfi_write(sc, 0, CFI_INTEL_READ_ID); 496 *id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 | 497 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 | 498 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 | 499 ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7))); 500 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 501 return 0; 502 } 503 504 /* 505 * Write the User/OEM 64-bit segment of the PR. 506 * XXX should allow writing individual words/bytes 507 */ 508 int 509 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id) 510 { 511 #ifdef CFI_ARMEDANDDANGEROUS 512 register_t intr; 513 int i, error; 514 #endif 515 516 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 517 return EOPNOTSUPP; 518 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 519 520 #ifdef CFI_ARMEDANDDANGEROUS 521 for (i = 7; i >= 4; i--, id >>= 16) { 522 intr = intr_disable(); 523 cfi_write(sc, 0, CFI_INTEL_PP_SETUP); 524 cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff); 525 intr_restore(intr); 526 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, 527 sc->sc_write_timeout); 528 if (error) 529 break; 530 } 531 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 532 return error; 533 #else 534 device_printf(sc->sc_dev, "%s: OEM PR not set, " 535 "CFI_ARMEDANDDANGEROUS not configured\n", __func__); 536 return ENXIO; 537 #endif 538 } 539 540 /* 541 * Read the contents of the Protection Lock Register. 542 */ 543 int 544 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr) 545 { 546 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 547 return EOPNOTSUPP; 548 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 549 550 cfi_write(sc, 0, CFI_INTEL_READ_ID); 551 *plr = cfi_get16(sc, CFI_INTEL_PLR); 552 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 553 return 0; 554 } 555 556 /* 557 * Write the Protection Lock Register to lock down the 558 * user-settable segment of the Protection Register. 559 * NOTE: this operation is not reversible. 560 */ 561 int 562 cfi_intel_set_plr(struct cfi_softc *sc) 563 { 564 #ifdef CFI_ARMEDANDDANGEROUS 565 register_t intr; 566 int error; 567 #endif 568 if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) 569 return EOPNOTSUPP; 570 KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); 571 572 #ifdef CFI_ARMEDANDDANGEROUS 573 /* worthy of console msg */ 574 device_printf(sc->sc_dev, "set PLR\n"); 575 intr = intr_disable(); 576 cfi_write(sc, 0, CFI_INTEL_PP_SETUP); 577 cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD); 578 intr_restore(intr); 579 error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout); 580 cfi_write(sc, 0, CFI_BCS_READ_ARRAY); 581 return error; 582 #else 583 device_printf(sc->sc_dev, "%s: PLR not set, " 584 "CFI_ARMEDANDDANGEROUS not configured\n", __func__); 585 return ENXIO; 586 #endif 587 } 588 #endif /* CFI_SUPPORT_STRATAFLASH */ 589