xref: /freebsd/sys/dev/cfi/cfi_core.c (revision 86dc8398c9ca2283c5d6984992b7a585257b5adb)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 2007, Juniper Networks, Inc.
5  * Copyright (c) 2012-2013, SRI International
6  * All rights reserved.
7  *
8  * Portions of this software were developed by SRI International and the
9  * University of Cambridge Computer Laboratory under DARPA/AFRL contract
10  * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
11  * programme.
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  * 1. Redistributions of source code must retain the above copyright
17  *    notice, this list of conditions and the following disclaimer.
18  * 2. Redistributions in binary form must reproduce the above copyright
19  *    notice, this list of conditions and the following disclaimer in the
20  *    documentation and/or other materials provided with the distribution.
21  * 3. Neither the name of the author nor the names of any co-contributors
22  *    may be used to endorse or promote products derived from this software
23  *    without specific prior written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35  * SUCH DAMAGE.
36  */
37 
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40 
41 #include "opt_cfi.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/endian.h>
48 #include <sys/kenv.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/module.h>
52 #include <sys/rman.h>
53 #include <sys/sysctl.h>
54 
55 #include <machine/bus.h>
56 
57 #include <dev/cfi/cfi_reg.h>
58 #include <dev/cfi/cfi_var.h>
59 
60 static void cfi_add_sysctls(struct cfi_softc *);
61 
62 extern struct cdevsw cfi_cdevsw;
63 
64 char cfi_driver_name[] = "cfi";
65 devclass_t cfi_diskclass;
66 
67 uint32_t
68 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
69 {
70 	uint32_t val;
71 
72 	ofs &= ~(sc->sc_width - 1);
73 	switch (sc->sc_width) {
74 	case 1:
75 		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
76 		break;
77 	case 2:
78 		val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
79 		break;
80 	case 4:
81 		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
82 		break;
83 	default:
84 		val = ~0;
85 		break;
86 	}
87 	return (val);
88 }
89 
90 uint32_t
91 cfi_read(struct cfi_softc *sc, u_int ofs)
92 {
93 	uint32_t val;
94 	uint16_t sval;
95 
96 	ofs &= ~(sc->sc_width - 1);
97 	switch (sc->sc_width) {
98 	case 1:
99 		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
100 		break;
101 	case 2:
102 		sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
103 #ifdef CFI_HARDWAREBYTESWAP
104 		val = sval;
105 #else
106 		val = le16toh(sval);
107 #endif
108 		break;
109 	case 4:
110 		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
111 #ifndef CFI_HARDWAREBYTESWAP
112 		val = le32toh(val);
113 #endif
114 		break;
115 	default:
116 		val = ~0;
117 		break;
118 	}
119 	return (val);
120 }
121 
122 static void
123 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
124 {
125 
126 	ofs &= ~(sc->sc_width - 1);
127 	switch (sc->sc_width) {
128 	case 1:
129 		bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
130 		break;
131 	case 2:
132 #ifdef CFI_HARDWAREBYTESWAP
133 		bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
134 #else
135 		bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
136 
137 #endif
138 		break;
139 	case 4:
140 #ifdef CFI_HARDWAREBYTESWAP
141 		bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
142 #else
143 		bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
144 #endif
145 		break;
146 	}
147 }
148 
149 /*
150  * This is same workaound as NetBSD sys/dev/nor/cfi.c cfi_reset_default()
151  */
152 static void
153 cfi_reset_default(struct cfi_softc *sc)
154 {
155 
156 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
157 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
158 }
159 
160 uint8_t
161 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
162 {
163 	uint8_t val;
164 
165 	cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
166 	val = cfi_read(sc, ofs * sc->sc_width);
167 	cfi_reset_default(sc);
168 	return (val);
169 }
170 
171 static void
172 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
173 {
174 
175 	cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
176 	cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
177 	cfi_write(sc, ofs + addr, data);
178 }
179 
180 static char *
181 cfi_fmtsize(uint32_t sz)
182 {
183 	static char buf[8];
184 	static const char *sfx[] = { "", "K", "M", "G" };
185 	int sfxidx;
186 
187 	sfxidx = 0;
188 	while (sfxidx < 3 && sz > 1023) {
189 		sz /= 1024;
190 		sfxidx++;
191 	}
192 
193 	sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
194 	return (buf);
195 }
196 
197 int
198 cfi_probe(device_t dev)
199 {
200 	char desc[80];
201 	struct cfi_softc *sc;
202 	char *vend_str;
203 	int error;
204 	uint16_t iface, vend;
205 
206 	sc = device_get_softc(dev);
207 	sc->sc_dev = dev;
208 
209 	sc->sc_rid = 0;
210 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
211 	    RF_ACTIVE);
212 	if (sc->sc_res == NULL)
213 		return (ENXIO);
214 
215 	sc->sc_tag = rman_get_bustag(sc->sc_res);
216 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
217 
218 	if (sc->sc_width == 0) {
219 		sc->sc_width = 1;
220 		while (sc->sc_width <= 4) {
221 			if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
222 				break;
223 			sc->sc_width <<= 1;
224 		}
225 	} else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
226 		error = ENXIO;
227 		goto out;
228 	}
229 	if (sc->sc_width > 4) {
230 		error = ENXIO;
231 		goto out;
232 	}
233 
234 	/* We got a Q. Check if we also have the R and the Y. */
235 	if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
236 	    cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
237 		error = ENXIO;
238 		goto out;
239 	}
240 
241 	/* Get the vendor and command set. */
242 	vend = cfi_read_qry(sc, CFI_QRY_VEND) |
243 	    (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
244 
245 	sc->sc_cmdset = vend;
246 
247 	switch (vend) {
248 	case CFI_VEND_AMD_ECS:
249 	case CFI_VEND_AMD_SCS:
250 		vend_str = "AMD/Fujitsu";
251 		break;
252 	case CFI_VEND_INTEL_ECS:
253 		vend_str = "Intel/Sharp";
254 		break;
255 	case CFI_VEND_INTEL_SCS:
256 		vend_str = "Intel";
257 		break;
258 	case CFI_VEND_MITSUBISHI_ECS:
259 	case CFI_VEND_MITSUBISHI_SCS:
260 		vend_str = "Mitsubishi";
261 		break;
262 	default:
263 		vend_str = "Unknown vendor";
264 		break;
265 	}
266 
267 	/* Get the device size. */
268 	sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
269 
270 	/* Sanity-check the I/F */
271 	iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
272 	    (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
273 
274 	/*
275 	 * Adding 1 to iface will give us a bit-wise "switch"
276 	 * that allows us to test for the interface width by
277 	 * testing a single bit.
278 	 */
279 	iface++;
280 
281 	error = (iface & sc->sc_width) ? 0 : EINVAL;
282 	if (error)
283 		goto out;
284 
285 	snprintf(desc, sizeof(desc), "%s - %s", vend_str,
286 	    cfi_fmtsize(sc->sc_size));
287 	device_set_desc_copy(dev, desc);
288 
289  out:
290 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
291 	return (error);
292 }
293 
294 int
295 cfi_attach(device_t dev)
296 {
297 	struct cfi_softc *sc;
298 	u_int blksz, blocks;
299 	u_int r, u;
300 	uint64_t mtoexp, ttoexp;
301 #ifdef CFI_SUPPORT_STRATAFLASH
302 	uint64_t ppr;
303 	char name[KENV_MNAMELEN], value[32];
304 #endif
305 
306 	sc = device_get_softc(dev);
307 	sc->sc_dev = dev;
308 
309 	sc->sc_rid = 0;
310 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
311 #ifndef ATSE_CFI_HACK
312 	    RF_ACTIVE);
313 #else
314 	    RF_ACTIVE | RF_SHAREABLE);
315 #endif
316 	if (sc->sc_res == NULL)
317 		return (ENXIO);
318 
319 	sc->sc_tag = rman_get_bustag(sc->sc_res);
320 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
321 
322 	/* Get time-out values for erase, write, and buffer write. */
323 	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
324 	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
325 	if (ttoexp == 0) {
326 		device_printf(dev, "erase timeout == 0, using 2^16ms\n");
327 		ttoexp = 16;
328 	}
329 	if (ttoexp > 41) {
330 		device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
331 		return (EINVAL);
332 	}
333 	if (mtoexp == 0) {
334 		device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
335 		    ttoexp + 4);
336 		mtoexp = 4;
337 	}
338 	if (ttoexp + mtoexp > 41) {
339 		device_printf(dev, "insane max erase timeout: 2^%jd\n",
340 		    ttoexp + mtoexp);
341 		return (EINVAL);
342 	}
343 	sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
344 	sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
345 	    sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
346 
347 	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
348 	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
349 	if (ttoexp == 0) {
350 		device_printf(dev, "write timeout == 0, using 2^18ns\n");
351 		ttoexp = 18;
352 	}
353 	if (ttoexp > 51) {
354 		device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
355 		return (EINVAL);
356 	}
357 	if (mtoexp == 0) {
358 		device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
359 		    ttoexp + 4);
360 		mtoexp = 4;
361 	}
362 	if (ttoexp + mtoexp > 51) {
363 		device_printf(dev, "insane max write timeout: 2^%jdus\n",
364 		    ttoexp + mtoexp);
365 		return (EINVAL);
366 	}
367 	sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
368 	sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
369 	    sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
370 
371 	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
372 	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
373 	/* Don't check for 0, it means not-supported. */
374 	if (ttoexp > 51) {
375 		device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
376 		return (EINVAL);
377 	}
378 	if (ttoexp + mtoexp > 51) {
379 		device_printf(dev, "insane max write timeout: 2^%jdus\n",
380 		    ttoexp + mtoexp);
381 		return (EINVAL);
382 	}
383 	sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
384 	    SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
385 	sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
386 	    sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
387 	    (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
388 
389 	/* Get the maximum size of a multibyte program */
390 	if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
391 		sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
392 		    cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
393 	else
394 		sc->sc_maxbuf = 0;
395 
396 	/* Get erase regions. */
397 	sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
398 	sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
399 	    M_TEMP, M_WAITOK | M_ZERO);
400 	for (r = 0; r < sc->sc_regions; r++) {
401 		blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
402 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
403 		sc->sc_region[r].r_blocks = blocks + 1;
404 
405 		blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
406 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
407 		sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
408 		    blksz * 256;
409 	}
410 
411 	/* Reset the device to a default state. */
412 	cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
413 
414 	if (bootverbose) {
415 		device_printf(dev, "[");
416 		for (r = 0; r < sc->sc_regions; r++) {
417 			printf("%ux%s%s", sc->sc_region[r].r_blocks,
418 			    cfi_fmtsize(sc->sc_region[r].r_blksz),
419 			    (r == sc->sc_regions - 1) ? "]\n" : ",");
420 		}
421 	}
422 
423 	if (sc->sc_cmdset == CFI_VEND_AMD_ECS  ||
424 	    sc->sc_cmdset == CFI_VEND_AMD_SCS) {
425 		cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_AUTO_SELECT);
426 		sc->sc_manid = cfi_read(sc, 0);
427 		sc->sc_devid = cfi_read(sc, 2);
428 		device_printf(dev, "Manufacturer ID:%x Device ID:%x\n",
429 		    sc->sc_manid, sc->sc_devid);
430 		cfi_write(sc, 0, CFI_BCS_READ_ARRAY2);
431 	}
432 
433 	u = device_get_unit(dev);
434 	sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
435 	    "%s%u", cfi_driver_name, u);
436 	sc->sc_nod->si_drv1 = sc;
437 
438 	cfi_add_sysctls(sc);
439 
440 #ifdef CFI_SUPPORT_STRATAFLASH
441 	/*
442 	 * Store the Intel factory PPR in the environment.  In some
443 	 * cases it is the most unique ID on a board.
444 	 */
445 	if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
446 		if (snprintf(name, sizeof(name), "%s.factory_ppr",
447 		    device_get_nameunit(dev)) < (sizeof(name) - 1) &&
448 		    snprintf(value, sizeof(value), "0x%016jx", ppr) <
449 		    (sizeof(value) - 1))
450 			(void) kern_setenv(name, value);
451 	}
452 #endif
453 
454 	device_add_child(dev, "cfid", -1);
455 	bus_generic_attach(dev);
456 
457 	return (0);
458 }
459 
460 static void
461 cfi_add_sysctls(struct cfi_softc *sc)
462 {
463 	struct sysctl_ctx_list *ctx;
464 	struct sysctl_oid_list *children;
465 
466 	ctx = device_get_sysctl_ctx(sc->sc_dev);
467 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
468 
469 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
470 	    "typical_erase_timout_count",
471 	    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
472 	    0, "Number of times the typical erase timeout was exceeded");
473 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
474 	    "max_erase_timout_count",
475 	    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
476 	    "Number of times the maximum erase timeout was exceeded");
477 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
478 	    "typical_write_timout_count",
479 	    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
480 	    "Number of times the typical write timeout was exceeded");
481 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
482 	    "max_write_timout_count",
483 	    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
484 	    "Number of times the maximum write timeout was exceeded");
485 	if (sc->sc_maxbuf > 0) {
486 		SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
487 		    "typical_bufwrite_timout_count",
488 		    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
489 		    "Number of times the typical buffered write timeout was "
490 		    "exceeded");
491 		SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
492 		    "max_bufwrite_timout_count",
493 		    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
494 		    "Number of times the maximum buffered write timeout was "
495 		    "exceeded");
496 	}
497 }
498 
499 int
500 cfi_detach(device_t dev)
501 {
502 	struct cfi_softc *sc;
503 
504 	sc = device_get_softc(dev);
505 
506 	destroy_dev(sc->sc_nod);
507 	free(sc->sc_region, M_TEMP);
508 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
509 	return (0);
510 }
511 
512 static bool
513 cfi_check_erase(struct cfi_softc *sc, u_int ofs, u_int sz)
514 {
515 	bool result;
516 	int i;
517 	uint32_t val;
518 
519 	result = FALSE;
520 	for (i = 0; i < sz; i += sc->sc_width) {
521 		val = cfi_read(sc, ofs + i);
522 		switch (sc->sc_width) {
523 		case 1:
524 			if (val != 0xff)
525 				goto out;
526 			continue;
527 		case 2:
528 			if (val != 0xffff)
529 				goto out;
530 			continue;
531 		case 4:
532 			if (val != 0xffffffff)
533 				goto out;
534 			continue;
535 		}
536 	}
537 	result = TRUE;
538 
539 out:
540 	return (result);
541 }
542 
543 static int
544 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
545     enum cfi_wait_cmd cmd)
546 {
547 	int done, error, tto_exceeded;
548 	uint32_t st0 = 0, st = 0;
549 	sbintime_t now;
550 
551 	done = 0;
552 	error = 0;
553 	tto_exceeded = 0;
554 	while (!done && !error) {
555 		/*
556 		 * Save time before we start so we always do one check
557 		 * after the timeout has expired.
558 		 */
559 		now = sbinuptime();
560 
561 		switch (sc->sc_cmdset) {
562 		case CFI_VEND_INTEL_ECS:
563 		case CFI_VEND_INTEL_SCS:
564 			st = cfi_read(sc, ofs);
565 			done = (st & CFI_INTEL_STATUS_WSMS);
566 			if (done) {
567 				/* NB: bit 0 is reserved */
568 				st &= ~(CFI_INTEL_XSTATUS_RSVD |
569 					CFI_INTEL_STATUS_WSMS |
570 					CFI_INTEL_STATUS_RSVD);
571 				if (st & CFI_INTEL_STATUS_DPS)
572 					error = EPERM;
573 				else if (st & CFI_INTEL_STATUS_PSLBS)
574 					error = EIO;
575 				else if (st & CFI_INTEL_STATUS_ECLBS)
576 					error = ENXIO;
577 				else if (st)
578 					error = EACCES;
579 			}
580 			break;
581 		case CFI_VEND_AMD_SCS:
582 		case CFI_VEND_AMD_ECS:
583 			st0 = cfi_read(sc, ofs);
584 			st = cfi_read(sc, ofs);
585 			done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
586 			break;
587 		}
588 
589 		if (tto_exceeded ||
590 		    now > start + sc->sc_typical_timeouts[cmd]) {
591 			if (!tto_exceeded) {
592 				tto_exceeded = 1;
593 				sc->sc_tto_counts[cmd]++;
594 #ifdef CFI_DEBUG_TIMEOUT
595 				device_printf(sc->sc_dev,
596 				    "typical timeout exceeded (cmd %d)", cmd);
597 #endif
598 			}
599 			if (now > start + sc->sc_max_timeouts[cmd]) {
600 				sc->sc_mto_counts[cmd]++;
601 #ifdef CFI_DEBUG_TIMEOUT
602 				device_printf(sc->sc_dev,
603 				    "max timeout exceeded (cmd %d)", cmd);
604 #endif
605 			}
606 		}
607 	}
608 	if (!done && !error)
609 		error = ETIMEDOUT;
610 	if (error)
611 		printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
612 	return (error);
613 }
614 
615 int
616 cfi_write_block(struct cfi_softc *sc)
617 {
618 	union {
619 		uint8_t		*x8;
620 		uint16_t	*x16;
621 		uint32_t	*x32;
622 	} ptr, cpyprt;
623 	register_t intr;
624 	int error, i, j, neederase = 0;
625 	uint32_t st;
626 	u_int wlen;
627 	sbintime_t start;
628 	u_int minsz;
629 	uint32_t val;
630 
631 	/* Intel flash must be unlocked before modification */
632 	switch (sc->sc_cmdset) {
633 	case CFI_VEND_INTEL_ECS:
634 	case CFI_VEND_INTEL_SCS:
635 		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
636 		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
637 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
638 		break;
639 	}
640 
641 	/* Check if an erase is required. */
642 	for (i = 0; i < sc->sc_wrbufsz; i++)
643 		if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
644 			neederase = 1;
645 			break;
646 		}
647 
648 	if (neederase) {
649 		intr = intr_disable();
650 		start = sbinuptime();
651 		/* Erase the block. */
652 		switch (sc->sc_cmdset) {
653 		case CFI_VEND_INTEL_ECS:
654 		case CFI_VEND_INTEL_SCS:
655 			cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
656 			cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
657 			break;
658 		case CFI_VEND_AMD_SCS:
659 		case CFI_VEND_AMD_ECS:
660 			/* find minimum sector size */
661 			minsz = sc->sc_region[0].r_blksz;
662 			for (i = 1; i < sc->sc_regions; i++) {
663 				if (sc->sc_region[i].r_blksz < minsz)
664 					minsz = sc->sc_region[i].r_blksz;
665 			}
666 			cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
667 			    CFI_AMD_ERASE_SECTOR);
668 			cfi_amd_write(sc, sc->sc_wrofs,
669 			    sc->sc_wrofs >> (ffs(minsz) - 1),
670 			    CFI_AMD_BLOCK_ERASE);
671 			for (i = 0; i < CFI_AMD_MAXCHK; ++i) {
672 				if (cfi_check_erase(sc, sc->sc_wrofs,
673 				    sc->sc_wrbufsz))
674 					break;
675 				DELAY(10);
676 			}
677 			if (i == CFI_AMD_MAXCHK) {
678 				printf("\nCFI Sector Erase time out error\n");
679 				return (ENODEV);
680 			}
681 			break;
682 		default:
683 			/* Better safe than sorry... */
684 			intr_restore(intr);
685 			return (ENODEV);
686 		}
687 		intr_restore(intr);
688 		error = cfi_wait_ready(sc, sc->sc_wrofs, start,
689 		    CFI_TIMEOUT_ERASE);
690 		if (error)
691 			goto out;
692 	} else
693 		error = 0;
694 
695 	/* Write the block using a multibyte write if supported. */
696 	ptr.x8 = sc->sc_wrbuf;
697 	cpyprt.x8 = sc->sc_wrbufcpy;
698 	if (sc->sc_maxbuf > sc->sc_width) {
699 		switch (sc->sc_cmdset) {
700 		case CFI_VEND_INTEL_ECS:
701 		case CFI_VEND_INTEL_SCS:
702 			for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
703 				wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
704 
705 				intr = intr_disable();
706 
707 				start = sbinuptime();
708 				do {
709 					cfi_write(sc, sc->sc_wrofs + i,
710 					    CFI_BCS_BUF_PROG_SETUP);
711 					if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
712 						error = ETIMEDOUT;
713 						goto out;
714 					}
715 					st = cfi_read(sc, sc->sc_wrofs + i);
716 				} while (! (st & CFI_INTEL_STATUS_WSMS));
717 
718 				cfi_write(sc, sc->sc_wrofs + i,
719 				    (wlen / sc->sc_width) - 1);
720 				switch (sc->sc_width) {
721 				case 1:
722 					bus_space_write_region_1(sc->sc_tag,
723 					    sc->sc_handle, sc->sc_wrofs + i,
724 					    ptr.x8 + i, wlen);
725 					break;
726 				case 2:
727 					bus_space_write_region_2(sc->sc_tag,
728 					    sc->sc_handle, sc->sc_wrofs + i,
729 					    ptr.x16 + i / 2, wlen / 2);
730 					break;
731 				case 4:
732 					bus_space_write_region_4(sc->sc_tag,
733 					    sc->sc_handle, sc->sc_wrofs + i,
734 					    ptr.x32 + i / 4, wlen / 4);
735 					break;
736 				}
737 
738 				cfi_write(sc, sc->sc_wrofs + i,
739 				    CFI_BCS_CONFIRM);
740 
741 				intr_restore(intr);
742 
743 				error = cfi_wait_ready(sc, sc->sc_wrofs + i,
744 				    start, CFI_TIMEOUT_BUFWRITE);
745 				if (error != 0)
746 					goto out;
747 			}
748 			goto out;
749 		default:
750 			/* Fall through to single word case */
751 			break;
752 		}
753 	}
754 
755 	/* Write the block one byte/word at a time. */
756 	for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
757 		/* Avoid writing unless we are actually changing bits */
758 		if (!neederase) {
759 			switch (sc->sc_width) {
760 			case 1:
761 				if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
762 					continue;
763 				break;
764 			case 2:
765 				if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
766 					continue;
767 				break;
768 			case 4:
769 				if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
770 					continue;
771 				break;
772 			}
773 		}
774 
775 		/*
776 		 * Make sure the command to start a write and the
777 		 * actual write happens back-to-back without any
778 		 * excessive delays.
779 		 */
780 		intr = intr_disable();
781 
782 		start = sbinuptime();
783 		switch (sc->sc_cmdset) {
784 		case CFI_VEND_INTEL_ECS:
785 		case CFI_VEND_INTEL_SCS:
786 			cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
787 			break;
788 		case CFI_VEND_AMD_SCS:
789 		case CFI_VEND_AMD_ECS:
790 			cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
791 			break;
792 		}
793 		switch (sc->sc_width) {
794 		case 1:
795 			bus_space_write_1(sc->sc_tag, sc->sc_handle,
796 			    sc->sc_wrofs + i, *(ptr.x8 + i));
797 			break;
798 		case 2:
799 			bus_space_write_2(sc->sc_tag, sc->sc_handle,
800 			    sc->sc_wrofs + i, *(ptr.x16 + i / 2));
801 			break;
802 		case 4:
803 			bus_space_write_4(sc->sc_tag, sc->sc_handle,
804 			    sc->sc_wrofs + i, *(ptr.x32 + i / 4));
805 			break;
806 		}
807 
808 		intr_restore(intr);
809 
810 		if (sc->sc_cmdset == CFI_VEND_AMD_ECS  ||
811 		    sc->sc_cmdset == CFI_VEND_AMD_SCS) {
812 			for (j = 0; j < CFI_AMD_MAXCHK; ++j) {
813 				switch (sc->sc_width) {
814 				case 1:
815 					val = *(ptr.x8 + i);
816 					break;
817 				case 2:
818 					val = *(ptr.x16 + i / 2);
819 					break;
820 				case 4:
821 					val = *(ptr.x32 + i / 4);
822 					break;
823 				}
824 
825 				if (cfi_read(sc, sc->sc_wrofs + i) == val)
826 					break;
827 
828 				DELAY(10);
829 			}
830 			if (j == CFI_AMD_MAXCHK) {
831 				printf("\nCFI Program Verify time out error\n");
832 				error = ENXIO;
833 				goto out;
834 			}
835 		} else {
836 			error = cfi_wait_ready(sc, sc->sc_wrofs, start,
837 			   CFI_TIMEOUT_WRITE);
838 			if (error)
839 				goto out;
840 		}
841 	}
842 
843 	/* error is 0. */
844 
845  out:
846 	cfi_reset_default(sc);
847 
848 	/* Relock Intel flash */
849 	switch (sc->sc_cmdset) {
850 	case CFI_VEND_INTEL_ECS:
851 	case CFI_VEND_INTEL_SCS:
852 		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
853 		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
854 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
855 		break;
856 	}
857 	return (error);
858 }
859 
860 #ifdef CFI_SUPPORT_STRATAFLASH
861 /*
862  * Intel StrataFlash Protection Register Support.
863  *
864  * The memory includes a 128-bit Protection Register that can be
865  * used for security.  There are two 64-bit segments; one is programmed
866  * at the factory with a unique 64-bit number which is immutable.
867  * The other segment is left blank for User (OEM) programming.
868  * The User/OEM segment is One Time Programmable (OTP).  It can also
869  * be locked to prevent any further writes by setting bit 0 of the
870  * Protection Lock Register (PLR).  The PLR can written only once.
871  */
872 
873 static uint16_t
874 cfi_get16(struct cfi_softc *sc, int off)
875 {
876 	uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
877 	return v;
878 }
879 
880 #ifdef CFI_ARMEDANDDANGEROUS
881 static void
882 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
883 {
884 	bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
885 }
886 #endif
887 
888 /*
889  * Read the factory-defined 64-bit segment of the PR.
890  */
891 int
892 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
893 {
894 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
895 		return EOPNOTSUPP;
896 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
897 
898 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
899 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
900 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
901 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
902 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
903 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
904 	return 0;
905 }
906 
907 /*
908  * Read the User/OEM 64-bit segment of the PR.
909  */
910 int
911 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
912 {
913 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
914 		return EOPNOTSUPP;
915 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
916 
917 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
918 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
919 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
920 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
921 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
922 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
923 	return 0;
924 }
925 
926 /*
927  * Write the User/OEM 64-bit segment of the PR.
928  * XXX should allow writing individual words/bytes
929  */
930 int
931 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
932 {
933 #ifdef CFI_ARMEDANDDANGEROUS
934 	register_t intr;
935 	int i, error;
936 	sbintime_t start;
937 #endif
938 
939 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
940 		return EOPNOTSUPP;
941 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
942 
943 #ifdef CFI_ARMEDANDDANGEROUS
944 	for (i = 7; i >= 4; i--, id >>= 16) {
945 		intr = intr_disable();
946 		start = sbinuptime();
947 		cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
948 		cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
949 		intr_restore(intr);
950 		error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
951 		    CFI_TIMEOUT_WRITE);
952 		if (error)
953 			break;
954 	}
955 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
956 	return error;
957 #else
958 	device_printf(sc->sc_dev, "%s: OEM PR not set, "
959 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
960 	return ENXIO;
961 #endif
962 }
963 
964 /*
965  * Read the contents of the Protection Lock Register.
966  */
967 int
968 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
969 {
970 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
971 		return EOPNOTSUPP;
972 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
973 
974 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
975 	*plr = cfi_get16(sc, CFI_INTEL_PLR);
976 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
977 	return 0;
978 }
979 
980 /*
981  * Write the Protection Lock Register to lock down the
982  * user-settable segment of the Protection Register.
983  * NOTE: this operation is not reversible.
984  */
985 int
986 cfi_intel_set_plr(struct cfi_softc *sc)
987 {
988 #ifdef CFI_ARMEDANDDANGEROUS
989 	register_t intr;
990 	int error;
991 	sbintime_t start;
992 #endif
993 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
994 		return EOPNOTSUPP;
995 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
996 
997 #ifdef CFI_ARMEDANDDANGEROUS
998 	/* worthy of console msg */
999 	device_printf(sc->sc_dev, "set PLR\n");
1000 	intr = intr_disable();
1001 	binuptime(&start);
1002 	cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
1003 	cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
1004 	intr_restore(intr);
1005 	error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
1006 	    CFI_TIMEOUT_WRITE);
1007 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
1008 	return error;
1009 #else
1010 	device_printf(sc->sc_dev, "%s: PLR not set, "
1011 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
1012 	return ENXIO;
1013 #endif
1014 }
1015 #endif /* CFI_SUPPORT_STRATAFLASH */
1016