xref: /freebsd/sys/dev/cfi/cfi_core.c (revision febdb468801f35e51c6c5c22221cfce9197c6f3b)
1 /*-
2  * Copyright (c) 2007, Juniper Networks, Inc.
3  * Copyright (c) 2012-2013, SRI International
4  * All rights reserved.
5  *
6  * Portions of this software were developed by SRI International and the
7  * University of Cambridge Computer Laboratory under DARPA/AFRL contract
8  * (FA8750-10-C-0237) ("CTSRD"), as part of the DARPA CRASH research
9  * programme.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. Neither the name of the author nor the names of any co-contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_cfi.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/bus.h>
44 #include <sys/conf.h>
45 #include <sys/endian.h>
46 #include <sys/kenv.h>
47 #include <sys/kernel.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/rman.h>
51 #include <sys/sysctl.h>
52 
53 #include <machine/bus.h>
54 
55 #include <dev/cfi/cfi_reg.h>
56 #include <dev/cfi/cfi_var.h>
57 
58 static void cfi_add_sysctls(struct cfi_softc *);
59 
60 extern struct cdevsw cfi_cdevsw;
61 
62 char cfi_driver_name[] = "cfi";
63 devclass_t cfi_devclass;
64 devclass_t cfi_diskclass;
65 
66 uint32_t
67 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
68 {
69 	uint32_t val;
70 
71 	ofs &= ~(sc->sc_width - 1);
72 	switch (sc->sc_width) {
73 	case 1:
74 		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
75 		break;
76 	case 2:
77 		val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
78 		break;
79 	case 4:
80 		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
81 		break;
82 	default:
83 		val = ~0;
84 		break;
85 	}
86 	return (val);
87 }
88 
89 uint32_t
90 cfi_read(struct cfi_softc *sc, u_int ofs)
91 {
92 	uint32_t val;
93 	uint16_t sval;
94 
95 	ofs &= ~(sc->sc_width - 1);
96 	switch (sc->sc_width) {
97 	case 1:
98 		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
99 		break;
100 	case 2:
101 		sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
102 		val = le16toh(sval);
103 		break;
104 	case 4:
105 		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
106 		val = le32toh(val);
107 		break;
108 	default:
109 		val = ~0;
110 		break;
111 	}
112 	return (val);
113 }
114 
115 static void
116 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
117 {
118 
119 	ofs &= ~(sc->sc_width - 1);
120 	switch (sc->sc_width) {
121 	case 1:
122 		bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
123 		break;
124 	case 2:
125 		bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
126 		break;
127 	case 4:
128 		bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
129 		break;
130 	}
131 }
132 
133 uint8_t
134 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
135 {
136 	uint8_t val;
137 
138 	cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
139 	val = cfi_read(sc, ofs * sc->sc_width);
140 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
141 	return (val);
142 }
143 
144 static void
145 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
146 {
147 
148 	cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
149 	cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
150 	cfi_write(sc, ofs + addr, data);
151 }
152 
153 static char *
154 cfi_fmtsize(uint32_t sz)
155 {
156 	static char buf[8];
157 	static const char *sfx[] = { "", "K", "M", "G" };
158 	int sfxidx;
159 
160 	sfxidx = 0;
161 	while (sfxidx < 3 && sz > 1023) {
162 		sz /= 1024;
163 		sfxidx++;
164 	}
165 
166 	sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
167 	return (buf);
168 }
169 
170 int
171 cfi_probe(device_t dev)
172 {
173 	char desc[80];
174 	struct cfi_softc *sc;
175 	char *vend_str;
176 	int error;
177 	uint16_t iface, vend;
178 
179 	sc = device_get_softc(dev);
180 	sc->sc_dev = dev;
181 
182 	sc->sc_rid = 0;
183 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
184 	    RF_ACTIVE);
185 	if (sc->sc_res == NULL)
186 		return (ENXIO);
187 
188 	sc->sc_tag = rman_get_bustag(sc->sc_res);
189 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
190 
191 	if (sc->sc_width == 0) {
192 		sc->sc_width = 1;
193 		while (sc->sc_width <= 4) {
194 			if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
195 				break;
196 			sc->sc_width <<= 1;
197 		}
198 	} else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
199 		error = ENXIO;
200 		goto out;
201 	}
202 	if (sc->sc_width > 4) {
203 		error = ENXIO;
204 		goto out;
205 	}
206 
207 	/* We got a Q. Check if we also have the R and the Y. */
208 	if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
209 	    cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
210 		error = ENXIO;
211 		goto out;
212 	}
213 
214 	/* Get the vendor and command set. */
215 	vend = cfi_read_qry(sc, CFI_QRY_VEND) |
216 	    (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
217 
218 	sc->sc_cmdset = vend;
219 
220 	switch (vend) {
221 	case CFI_VEND_AMD_ECS:
222 	case CFI_VEND_AMD_SCS:
223 		vend_str = "AMD/Fujitsu";
224 		break;
225 	case CFI_VEND_INTEL_ECS:
226 		vend_str = "Intel/Sharp";
227 		break;
228 	case CFI_VEND_INTEL_SCS:
229 		vend_str = "Intel";
230 		break;
231 	case CFI_VEND_MITSUBISHI_ECS:
232 	case CFI_VEND_MITSUBISHI_SCS:
233 		vend_str = "Mitsubishi";
234 		break;
235 	default:
236 		vend_str = "Unknown vendor";
237 		break;
238 	}
239 
240 	/* Get the device size. */
241 	sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
242 
243 	/* Sanity-check the I/F */
244 	iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
245 	    (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
246 
247 	/*
248 	 * Adding 1 to iface will give us a bit-wise "switch"
249 	 * that allows us to test for the interface width by
250 	 * testing a single bit.
251 	 */
252 	iface++;
253 
254 	error = (iface & sc->sc_width) ? 0 : EINVAL;
255 	if (error)
256 		goto out;
257 
258 	snprintf(desc, sizeof(desc), "%s - %s", vend_str,
259 	    cfi_fmtsize(sc->sc_size));
260 	device_set_desc_copy(dev, desc);
261 
262  out:
263 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
264 	return (error);
265 }
266 
267 int
268 cfi_attach(device_t dev)
269 {
270 	struct cfi_softc *sc;
271 	u_int blksz, blocks;
272 	u_int r, u;
273 	uint64_t mtoexp, ttoexp;
274 #ifdef CFI_SUPPORT_STRATAFLASH
275 	uint64_t ppr;
276 	char name[KENV_MNAMELEN], value[32];
277 #endif
278 
279 	sc = device_get_softc(dev);
280 	sc->sc_dev = dev;
281 
282 	sc->sc_rid = 0;
283 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
284 	    RF_ACTIVE);
285 	if (sc->sc_res == NULL)
286 		return (ENXIO);
287 
288 	sc->sc_tag = rman_get_bustag(sc->sc_res);
289 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
290 
291 	/* Get time-out values for erase, write, and buffer write. */
292 	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
293 	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
294 	if (ttoexp == 0) {
295 		device_printf(dev, "erase timeout == 0, using 2^16ms\n");
296 		ttoexp = 16;
297 	}
298 	if (ttoexp > 41) {
299 		device_printf(dev, "insane timeout: 2^%jdms\n", ttoexp);
300 		return (EINVAL);
301 	}
302 	if (mtoexp == 0) {
303 		device_printf(dev, "max erase timeout == 0, using 2^%jdms\n",
304 		    ttoexp + 4);
305 		mtoexp = 4;
306 	}
307 	if (ttoexp + mtoexp > 41) {
308 		device_printf(dev, "insane max erase timeout: 2^%jd\n",
309 		    ttoexp + mtoexp);
310 		return (EINVAL);
311 	}
312 	sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] = SBT_1MS * (1ULL << ttoexp);
313 	sc->sc_max_timeouts[CFI_TIMEOUT_ERASE] =
314 	    sc->sc_typical_timeouts[CFI_TIMEOUT_ERASE] * (1ULL << mtoexp);
315 
316 	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
317 	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
318 	if (ttoexp == 0) {
319 		device_printf(dev, "write timeout == 0, using 2^18ns\n");
320 		ttoexp = 18;
321 	}
322 	if (ttoexp > 51) {
323 		device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
324 		return (EINVAL);
325 	}
326 	if (mtoexp == 0) {
327 		device_printf(dev, "max write timeout == 0, using 2^%jdms\n",
328 		    ttoexp + 4);
329 		mtoexp = 4;
330 	}
331 	if (ttoexp + mtoexp > 51) {
332 		device_printf(dev, "insane max write timeout: 2^%jdus\n",
333 		    ttoexp + mtoexp);
334 		return (EINVAL);
335 	}
336 	sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] = SBT_1US * (1ULL << ttoexp);
337 	sc->sc_max_timeouts[CFI_TIMEOUT_WRITE] =
338 	    sc->sc_typical_timeouts[CFI_TIMEOUT_WRITE] * (1ULL << mtoexp);
339 
340 	ttoexp = cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE);
341 	mtoexp = cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE);
342 	/* Don't check for 0, it means not-supported. */
343 	if (ttoexp > 51) {
344 		device_printf(dev, "insane write timeout: 2^%jdus\n", ttoexp);
345 		return (EINVAL);
346 	}
347 	if (ttoexp + mtoexp > 51) {
348 		device_printf(dev, "insane max write timeout: 2^%jdus\n",
349 		    ttoexp + mtoexp);
350 		return (EINVAL);
351 	}
352 	sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] =
353 	    SBT_1US * (1ULL << cfi_read_qry(sc, CFI_QRY_TTO_BUFWRITE));
354 	sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE] =
355 	    sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] *
356 	    (1ULL << cfi_read_qry(sc, CFI_QRY_MTO_BUFWRITE));
357 
358 	/* Get the maximum size of a multibyte program */
359 	if (sc->sc_typical_timeouts[CFI_TIMEOUT_BUFWRITE] != 0)
360 		sc->sc_maxbuf = 1 << (cfi_read_qry(sc, CFI_QRY_MAXBUF) |
361 		    cfi_read_qry(sc, CFI_QRY_MAXBUF) << 8);
362 	else
363 		sc->sc_maxbuf = 0;
364 
365 	/* Get erase regions. */
366 	sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
367 	sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
368 	    M_TEMP, M_WAITOK | M_ZERO);
369 	for (r = 0; r < sc->sc_regions; r++) {
370 		blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
371 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
372 		sc->sc_region[r].r_blocks = blocks + 1;
373 
374 		blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
375 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
376 		sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
377 		    blksz * 256;
378 	}
379 
380 	/* Reset the device to a default state. */
381 	cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
382 
383 	if (bootverbose) {
384 		device_printf(dev, "[");
385 		for (r = 0; r < sc->sc_regions; r++) {
386 			printf("%ux%s%s", sc->sc_region[r].r_blocks,
387 			    cfi_fmtsize(sc->sc_region[r].r_blksz),
388 			    (r == sc->sc_regions - 1) ? "]\n" : ",");
389 		}
390 	}
391 
392 	u = device_get_unit(dev);
393 	sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
394 	    "%s%u", cfi_driver_name, u);
395 	sc->sc_nod->si_drv1 = sc;
396 
397 	cfi_add_sysctls(sc);
398 
399 #ifdef CFI_SUPPORT_STRATAFLASH
400 	/*
401 	 * Store the Intel factory PPR in the environment.  In some
402 	 * cases it is the most unique ID on a board.
403 	 */
404 	if (cfi_intel_get_factory_pr(sc, &ppr) == 0) {
405 		if (snprintf(name, sizeof(name), "%s.factory_ppr",
406 		    device_get_nameunit(dev)) < (sizeof(name) - 1) &&
407 		    snprintf(value, sizeof(value), "0x%016jx", ppr) <
408 		    (sizeof(value) - 1))
409 			(void) setenv(name, value);
410 	}
411 #endif
412 
413 	device_add_child(dev, "cfid", -1);
414 	bus_generic_attach(dev);
415 
416 	return (0);
417 }
418 
419 static void
420 cfi_add_sysctls(struct cfi_softc *sc)
421 {
422 	struct sysctl_ctx_list *ctx;
423 	struct sysctl_oid_list *children;
424 
425 	ctx = device_get_sysctl_ctx(sc->sc_dev);
426 	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->sc_dev));
427 
428 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
429 	    "typical_erase_timout_count",
430 	    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_ERASE],
431 	    0, "Number of times the typical erase timeout was exceeded");
432 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
433 	    "max_erase_timout_count",
434 	    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_ERASE], 0,
435 	    "Number of times the maximum erase timeout was exceeded");
436 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
437 	    "typical_write_timout_count",
438 	    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_WRITE], 0,
439 	    "Number of times the typical write timeout was exceeded");
440 	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
441 	    "max_write_timout_count",
442 	    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_WRITE], 0,
443 	    "Number of times the maximum write timeout was exceeded");
444 	if (sc->sc_maxbuf > 0) {
445 		SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
446 		    "typical_bufwrite_timout_count",
447 		    CTLFLAG_RD, &sc->sc_tto_counts[CFI_TIMEOUT_BUFWRITE], 0,
448 		    "Number of times the typical buffered write timeout was "
449 		    "exceeded");
450 		SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
451 		    "max_bufwrite_timout_count",
452 		    CTLFLAG_RD, &sc->sc_mto_counts[CFI_TIMEOUT_BUFWRITE], 0,
453 		    "Number of times the maximum buffered write timeout was "
454 		    "exceeded");
455 	}
456 }
457 
458 int
459 cfi_detach(device_t dev)
460 {
461 	struct cfi_softc *sc;
462 
463 	sc = device_get_softc(dev);
464 
465 	destroy_dev(sc->sc_nod);
466 	free(sc->sc_region, M_TEMP);
467 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
468 	return (0);
469 }
470 
471 static int
472 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, sbintime_t start,
473     enum cfi_wait_cmd cmd)
474 {
475 	int done, error, tto_exceeded;
476 	uint32_t st0 = 0, st = 0;
477 	sbintime_t now;
478 
479 	done = 0;
480 	error = 0;
481 	tto_exceeded = 0;
482 	while (!done && !error) {
483 		/*
484 		 * Save time before we start so we always do one check
485 		 * after the timeout has expired.
486 		 */
487 		now = sbinuptime();
488 
489 		switch (sc->sc_cmdset) {
490 		case CFI_VEND_INTEL_ECS:
491 		case CFI_VEND_INTEL_SCS:
492 			st = cfi_read(sc, ofs);
493 			done = (st & CFI_INTEL_STATUS_WSMS);
494 			if (done) {
495 				/* NB: bit 0 is reserved */
496 				st &= ~(CFI_INTEL_XSTATUS_RSVD |
497 					CFI_INTEL_STATUS_WSMS |
498 					CFI_INTEL_STATUS_RSVD);
499 				if (st & CFI_INTEL_STATUS_DPS)
500 					error = EPERM;
501 				else if (st & CFI_INTEL_STATUS_PSLBS)
502 					error = EIO;
503 				else if (st & CFI_INTEL_STATUS_ECLBS)
504 					error = ENXIO;
505 				else if (st)
506 					error = EACCES;
507 			}
508 			break;
509 		case CFI_VEND_AMD_SCS:
510 		case CFI_VEND_AMD_ECS:
511 			st0 = cfi_read(sc, ofs);
512 			st = cfi_read(sc, ofs);
513 			done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
514 			break;
515 		}
516 
517 		if (tto_exceeded ||
518 		    now > start + sc->sc_typical_timeouts[cmd]) {
519 			if (!tto_exceeded) {
520 				tto_exceeded = 1;
521 				sc->sc_tto_counts[cmd]++;
522 #ifdef CFI_DEBUG_TIMEOUT
523 				device_printf(sc->sc_dev,
524 				    "typical timeout exceeded (cmd %d)", cmd);
525 #endif
526 			}
527 			if (now > start + sc->sc_max_timeouts[cmd]) {
528 				sc->sc_mto_counts[cmd]++;
529 #ifdef CFI_DEBUG_TIMEOUT
530 				device_printf(sc->sc_dev,
531 				    "max timeout exceeded (cmd %d)", cmd);
532 #endif
533 			}
534 		}
535 	}
536 	if (!done && !error)
537 		error = ETIMEDOUT;
538 	if (error)
539 		printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
540 	return (error);
541 }
542 
543 int
544 cfi_write_block(struct cfi_softc *sc)
545 {
546 	union {
547 		uint8_t		*x8;
548 		uint16_t	*x16;
549 		uint32_t	*x32;
550 	} ptr, cpyprt;
551 	register_t intr;
552 	int error, i, neederase = 0;
553 	uint32_t st;
554 	u_int wlen;
555 	sbintime_t start;
556 
557 	/* Intel flash must be unlocked before modification */
558 	switch (sc->sc_cmdset) {
559 	case CFI_VEND_INTEL_ECS:
560 	case CFI_VEND_INTEL_SCS:
561 		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
562 		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_UB);
563 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
564 		break;
565 	}
566 
567 	/* Check if an erase is required. */
568 	for (i = 0; i < sc->sc_wrbufsz; i++)
569 		if ((sc->sc_wrbuf[i] & sc->sc_wrbufcpy[i]) != sc->sc_wrbuf[i]) {
570 			neederase = 1;
571 			break;
572 		}
573 
574 	if (neederase) {
575 		intr = intr_disable();
576 		start = sbinuptime();
577 		/* Erase the block. */
578 		switch (sc->sc_cmdset) {
579 		case CFI_VEND_INTEL_ECS:
580 		case CFI_VEND_INTEL_SCS:
581 			cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
582 			cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
583 			break;
584 		case CFI_VEND_AMD_SCS:
585 		case CFI_VEND_AMD_ECS:
586 			cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
587 			    CFI_AMD_ERASE_SECTOR);
588 			cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
589 			break;
590 		default:
591 			/* Better safe than sorry... */
592 			intr_restore(intr);
593 			return (ENODEV);
594 		}
595 		intr_restore(intr);
596 		error = cfi_wait_ready(sc, sc->sc_wrofs, start,
597 		    CFI_TIMEOUT_ERASE);
598 		if (error)
599 			goto out;
600 	} else
601 		error = 0;
602 
603 	/* Write the block using a multibyte write if supported. */
604 	ptr.x8 = sc->sc_wrbuf;
605 	cpyprt.x8 = sc->sc_wrbufcpy;
606 	if (sc->sc_maxbuf > sc->sc_width) {
607 		switch (sc->sc_cmdset) {
608 		case CFI_VEND_INTEL_ECS:
609 		case CFI_VEND_INTEL_SCS:
610 			for (i = 0; i < sc->sc_wrbufsz; i += wlen) {
611 				wlen = MIN(sc->sc_maxbuf, sc->sc_wrbufsz - i);
612 
613 				intr = intr_disable();
614 
615 				start = sbinuptime();
616 				do {
617 					cfi_write(sc, sc->sc_wrofs + i,
618 					    CFI_BCS_BUF_PROG_SETUP);
619 					if (sbinuptime() > start + sc->sc_max_timeouts[CFI_TIMEOUT_BUFWRITE]) {
620 						error = ETIMEDOUT;
621 						goto out;
622 					}
623 					st = cfi_read(sc, sc->sc_wrofs + i);
624 				} while (! (st & CFI_INTEL_STATUS_WSMS));
625 
626 				cfi_write(sc, sc->sc_wrofs + i,
627 				    (wlen / sc->sc_width) - 1);
628 				switch (sc->sc_width) {
629 				case 1:
630 					bus_space_write_region_1(sc->sc_tag,
631 					    sc->sc_handle, sc->sc_wrofs + i,
632 					    ptr.x8 + i, wlen);
633 					break;
634 				case 2:
635 					bus_space_write_region_2(sc->sc_tag,
636 					    sc->sc_handle, sc->sc_wrofs + i,
637 					    ptr.x16 + i / 2, wlen / 2);
638 					break;
639 				case 4:
640 					bus_space_write_region_4(sc->sc_tag,
641 					    sc->sc_handle, sc->sc_wrofs + i,
642 					    ptr.x32 + i / 4, wlen / 4);
643 					break;
644 				}
645 
646 				cfi_write(sc, sc->sc_wrofs + i,
647 				    CFI_BCS_CONFIRM);
648 
649 				intr_restore(intr);
650 
651 				error = cfi_wait_ready(sc, sc->sc_wrofs + i,
652 				    start, CFI_TIMEOUT_BUFWRITE);
653 				if (error != 0)
654 					goto out;
655 			}
656 			goto out;
657 		default:
658 			/* Fall through to single word case */
659 			break;
660 		}
661 
662 	}
663 
664 	/* Write the block one byte/word at a time. */
665 	for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
666 
667 		/* Avoid writing unless we are actually changing bits */
668 		if (!neederase) {
669 			switch (sc->sc_width) {
670 			case 1:
671 				if(*(ptr.x8 + i) == *(cpyprt.x8 + i))
672 					continue;
673 				break;
674 			case 2:
675 				if(*(ptr.x16 + i / 2) == *(cpyprt.x16 + i / 2))
676 					continue;
677 				break;
678 			case 4:
679 				if(*(ptr.x32 + i / 4) == *(cpyprt.x32 + i / 4))
680 					continue;
681 				break;
682 			}
683 		}
684 
685 		/*
686 		 * Make sure the command to start a write and the
687 		 * actual write happens back-to-back without any
688 		 * excessive delays.
689 		 */
690 		intr = intr_disable();
691 
692 		start = sbinuptime();
693 		switch (sc->sc_cmdset) {
694 		case CFI_VEND_INTEL_ECS:
695 		case CFI_VEND_INTEL_SCS:
696 			cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
697 			break;
698 		case CFI_VEND_AMD_SCS:
699 		case CFI_VEND_AMD_ECS:
700 			cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
701 			break;
702 		}
703 		switch (sc->sc_width) {
704 		case 1:
705 			bus_space_write_1(sc->sc_tag, sc->sc_handle,
706 			    sc->sc_wrofs + i, *(ptr.x8 + i));
707 			break;
708 		case 2:
709 			bus_space_write_2(sc->sc_tag, sc->sc_handle,
710 			    sc->sc_wrofs + i, *(ptr.x16 + i / 2));
711 			break;
712 		case 4:
713 			bus_space_write_4(sc->sc_tag, sc->sc_handle,
714 			    sc->sc_wrofs + i, *(ptr.x32 + i / 4));
715 			break;
716 		}
717 
718 		intr_restore(intr);
719 
720 		error = cfi_wait_ready(sc, sc->sc_wrofs, start,
721 		   CFI_TIMEOUT_WRITE);
722 		if (error)
723 			goto out;
724 	}
725 
726 	/* error is 0. */
727 
728  out:
729 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
730 
731 	/* Relock Intel flash */
732 	switch (sc->sc_cmdset) {
733 	case CFI_VEND_INTEL_ECS:
734 	case CFI_VEND_INTEL_SCS:
735 		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LBS);
736 		cfi_write(sc, sc->sc_wrofs, CFI_INTEL_LB);
737 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_READ_ARRAY);
738 		break;
739 	}
740 	return (error);
741 }
742 
743 #ifdef CFI_SUPPORT_STRATAFLASH
744 /*
745  * Intel StrataFlash Protection Register Support.
746  *
747  * The memory includes a 128-bit Protection Register that can be
748  * used for security.  There are two 64-bit segments; one is programmed
749  * at the factory with a unique 64-bit number which is immutable.
750  * The other segment is left blank for User (OEM) programming.
751  * The User/OEM segment is One Time Programmable (OTP).  It can also
752  * be locked to prevent any further writes by setting bit 0 of the
753  * Protection Lock Register (PLR).  The PLR can written only once.
754  */
755 
756 static uint16_t
757 cfi_get16(struct cfi_softc *sc, int off)
758 {
759 	uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
760 	return v;
761 }
762 
763 #ifdef CFI_ARMEDANDDANGEROUS
764 static void
765 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
766 {
767 	bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
768 }
769 #endif
770 
771 /*
772  * Read the factory-defined 64-bit segment of the PR.
773  */
774 int
775 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
776 {
777 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
778 		return EOPNOTSUPP;
779 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
780 
781 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
782 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
783 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
784 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
785 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
786 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
787 	return 0;
788 }
789 
790 /*
791  * Read the User/OEM 64-bit segment of the PR.
792  */
793 int
794 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
795 {
796 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
797 		return EOPNOTSUPP;
798 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
799 
800 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
801 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
802 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
803 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
804 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
805 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
806 	return 0;
807 }
808 
809 /*
810  * Write the User/OEM 64-bit segment of the PR.
811  * XXX should allow writing individual words/bytes
812  */
813 int
814 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
815 {
816 #ifdef CFI_ARMEDANDDANGEROUS
817 	register_t intr;
818 	int i, error;
819 	sbintime_t start;
820 #endif
821 
822 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
823 		return EOPNOTSUPP;
824 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
825 
826 #ifdef CFI_ARMEDANDDANGEROUS
827 	for (i = 7; i >= 4; i--, id >>= 16) {
828 		intr = intr_disable();
829 		start = sbinuptime();
830 		cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
831 		cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
832 		intr_restore(intr);
833 		error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
834 		    CFI_TIMEOUT_WRITE);
835 		if (error)
836 			break;
837 	}
838 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
839 	return error;
840 #else
841 	device_printf(sc->sc_dev, "%s: OEM PR not set, "
842 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
843 	return ENXIO;
844 #endif
845 }
846 
847 /*
848  * Read the contents of the Protection Lock Register.
849  */
850 int
851 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
852 {
853 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
854 		return EOPNOTSUPP;
855 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
856 
857 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
858 	*plr = cfi_get16(sc, CFI_INTEL_PLR);
859 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
860 	return 0;
861 }
862 
863 /*
864  * Write the Protection Lock Register to lock down the
865  * user-settable segment of the Protection Register.
866  * NOTE: this operation is not reversible.
867  */
868 int
869 cfi_intel_set_plr(struct cfi_softc *sc)
870 {
871 #ifdef CFI_ARMEDANDDANGEROUS
872 	register_t intr;
873 	int error;
874 	sbintime_t start;
875 #endif
876 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
877 		return EOPNOTSUPP;
878 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
879 
880 #ifdef CFI_ARMEDANDDANGEROUS
881 	/* worthy of console msg */
882 	device_printf(sc->sc_dev, "set PLR\n");
883 	intr = intr_disable();
884 	binuptime(&start);
885 	cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
886 	cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
887 	intr_restore(intr);
888 	error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, start,
889 	    CFI_TIMEOUT_WRITE);
890 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
891 	return error;
892 #else
893 	device_printf(sc->sc_dev, "%s: PLR not set, "
894 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
895 	return ENXIO;
896 #endif
897 }
898 #endif /* CFI_SUPPORT_STRATAFLASH */
899