xref: /freebsd/sys/dev/cfi/cfi_core.c (revision c243e4902be8df1e643c76b5f18b68bb77cc5268)
1 /*-
2  * Copyright (c) 2007, Juniper Networks, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_cfi.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/rman.h>
44 #include <sys/sysctl.h>
45 
46 #include <machine/bus.h>
47 
48 #include <dev/cfi/cfi_reg.h>
49 #include <dev/cfi/cfi_var.h>
50 
51 extern struct cdevsw cfi_cdevsw;
52 
53 char cfi_driver_name[] = "cfi";
54 devclass_t cfi_devclass;
55 devclass_t cfi_diskclass;
56 
57 uint32_t
58 cfi_read_raw(struct cfi_softc *sc, u_int ofs)
59 {
60 	uint32_t val;
61 
62 	ofs &= ~(sc->sc_width - 1);
63 	switch (sc->sc_width) {
64 	case 1:
65 		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
66 		break;
67 	case 2:
68 		val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
69 		break;
70 	case 4:
71 		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
72 		break;
73 	default:
74 		val = ~0;
75 		break;
76 	}
77 	return (val);
78 }
79 
80 uint32_t
81 cfi_read(struct cfi_softc *sc, u_int ofs)
82 {
83 	uint32_t val;
84 	uint16_t sval;
85 
86 	ofs &= ~(sc->sc_width - 1);
87 	switch (sc->sc_width) {
88 	case 1:
89 		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
90 		break;
91 	case 2:
92 		sval = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
93 		val = le16toh(sval);
94 		break;
95 	case 4:
96 		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
97 		val = le32toh(val);
98 		break;
99 	default:
100 		val = ~0;
101 		break;
102 	}
103 	return (val);
104 }
105 
106 static void
107 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
108 {
109 
110 	ofs &= ~(sc->sc_width - 1);
111 	switch (sc->sc_width) {
112 	case 1:
113 		bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
114 		break;
115 	case 2:
116 		bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, htole16(val));
117 		break;
118 	case 4:
119 		bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, htole32(val));
120 		break;
121 	}
122 }
123 
124 uint8_t
125 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
126 {
127 	uint8_t val;
128 
129 	cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
130 	val = cfi_read(sc, ofs * sc->sc_width);
131 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
132 	return (val);
133 }
134 
135 static void
136 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
137 {
138 
139 	cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
140 	cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
141 	cfi_write(sc, ofs + addr, data);
142 }
143 
144 static char *
145 cfi_fmtsize(uint32_t sz)
146 {
147 	static char buf[8];
148 	static const char *sfx[] = { "", "K", "M", "G" };
149 	int sfxidx;
150 
151 	sfxidx = 0;
152 	while (sfxidx < 3 && sz > 1023) {
153 		sz /= 1024;
154 		sfxidx++;
155 	}
156 
157 	sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
158 	return (buf);
159 }
160 
161 int
162 cfi_probe(device_t dev)
163 {
164 	char desc[80];
165 	struct cfi_softc *sc;
166 	char *vend_str;
167 	int error;
168 	uint16_t iface, vend;
169 
170 	sc = device_get_softc(dev);
171 	sc->sc_dev = dev;
172 
173 	sc->sc_rid = 0;
174 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
175 	    RF_ACTIVE);
176 	if (sc->sc_res == NULL)
177 		return (ENXIO);
178 
179 	sc->sc_tag = rman_get_bustag(sc->sc_res);
180 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
181 
182 	if (sc->sc_width == 0) {
183 		sc->sc_width = 1;
184 		while (sc->sc_width <= 4) {
185 			if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
186 				break;
187 			sc->sc_width <<= 1;
188 		}
189 	} else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
190 		error = ENXIO;
191 		goto out;
192 	}
193 	if (sc->sc_width > 4) {
194 		error = ENXIO;
195 		goto out;
196 	}
197 
198 	/* We got a Q. Check if we also have the R and the Y. */
199 	if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
200 	    cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
201 		error = ENXIO;
202 		goto out;
203 	}
204 
205 	/* Get the vendor and command set. */
206 	vend = cfi_read_qry(sc, CFI_QRY_VEND) |
207 	    (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
208 
209 	sc->sc_cmdset = vend;
210 
211 	switch (vend) {
212 	case CFI_VEND_AMD_ECS:
213 	case CFI_VEND_AMD_SCS:
214 		vend_str = "AMD/Fujitsu";
215 		break;
216 	case CFI_VEND_INTEL_ECS:
217 		vend_str = "Intel/Sharp";
218 		break;
219 	case CFI_VEND_INTEL_SCS:
220 		vend_str = "Intel";
221 		break;
222 	case CFI_VEND_MITSUBISHI_ECS:
223 	case CFI_VEND_MITSUBISHI_SCS:
224 		vend_str = "Mitsubishi";
225 		break;
226 	default:
227 		vend_str = "Unknown vendor";
228 		break;
229 	}
230 
231 	/* Get the device size. */
232 	sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
233 
234 	/* Sanity-check the I/F */
235 	iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
236 	    (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
237 
238 	/*
239 	 * Adding 1 to iface will give us a bit-wise "switch"
240 	 * that allows us to test for the interface width by
241 	 * testing a single bit.
242 	 */
243 	iface++;
244 
245 	error = (iface & sc->sc_width) ? 0 : EINVAL;
246 	if (error)
247 		goto out;
248 
249 	snprintf(desc, sizeof(desc), "%s - %s", vend_str,
250 	    cfi_fmtsize(sc->sc_size));
251 	device_set_desc_copy(dev, desc);
252 
253  out:
254 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
255 	return (error);
256 }
257 
258 int
259 cfi_attach(device_t dev)
260 {
261 	struct cfi_softc *sc;
262 	u_int blksz, blocks;
263 	u_int r, u;
264 
265 	sc = device_get_softc(dev);
266 	sc->sc_dev = dev;
267 
268 	sc->sc_rid = 0;
269 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
270 	    RF_ACTIVE);
271 	if (sc->sc_res == NULL)
272 		return (ENXIO);
273 
274 	sc->sc_tag = rman_get_bustag(sc->sc_res);
275 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
276 
277 	/* Get time-out values for erase and write. */
278 	sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
279 	sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
280 	sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
281 	sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
282 
283 	/* Get erase regions. */
284 	sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
285 	sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
286 	    M_TEMP, M_WAITOK | M_ZERO);
287 	for (r = 0; r < sc->sc_regions; r++) {
288 		blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
289 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
290 		sc->sc_region[r].r_blocks = blocks + 1;
291 
292 		blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
293 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
294 		sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
295 		    blksz * 256;
296 	}
297 
298 	/* Reset the device to a default state. */
299 	cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
300 
301 	if (bootverbose) {
302 		device_printf(dev, "[");
303 		for (r = 0; r < sc->sc_regions; r++) {
304 			printf("%ux%s%s", sc->sc_region[r].r_blocks,
305 			    cfi_fmtsize(sc->sc_region[r].r_blksz),
306 			    (r == sc->sc_regions - 1) ? "]\n" : ",");
307 		}
308 	}
309 
310 	u = device_get_unit(dev);
311 	sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
312 	    "%s%u", cfi_driver_name, u);
313 	sc->sc_nod->si_drv1 = sc;
314 
315 	device_add_child(dev, "cfid", -1);
316 	bus_generic_attach(dev);
317 
318 	return (0);
319 }
320 
321 int
322 cfi_detach(device_t dev)
323 {
324 	struct cfi_softc *sc;
325 
326 	sc = device_get_softc(dev);
327 
328 	destroy_dev(sc->sc_nod);
329 	free(sc->sc_region, M_TEMP);
330 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
331 	return (0);
332 }
333 
334 static int
335 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
336 {
337 	int done, error;
338 	uint32_t st0 = 0, st = 0;
339 
340 	done = 0;
341 	error = 0;
342 	timeout *= 10;
343 	while (!done && !error && timeout) {
344 		DELAY(100);
345 		timeout--;
346 
347 		switch (sc->sc_cmdset) {
348 		case CFI_VEND_INTEL_ECS:
349 		case CFI_VEND_INTEL_SCS:
350 			st = cfi_read(sc, ofs);
351 			done = (st & CFI_INTEL_STATUS_WSMS);
352 			if (done) {
353 				/* NB: bit 0 is reserved */
354 				st &= ~(CFI_INTEL_XSTATUS_RSVD |
355 					CFI_INTEL_STATUS_WSMS |
356 					CFI_INTEL_STATUS_RSVD);
357 				if (st & CFI_INTEL_STATUS_DPS)
358 					error = EPERM;
359 				else if (st & CFI_INTEL_STATUS_PSLBS)
360 					error = EIO;
361 				else if (st & CFI_INTEL_STATUS_ECLBS)
362 					error = ENXIO;
363 				else if (st)
364 					error = EACCES;
365 			}
366 			break;
367 		case CFI_VEND_AMD_SCS:
368 		case CFI_VEND_AMD_ECS:
369 			st0 = cfi_read(sc, ofs);
370 			st = cfi_read(sc, ofs);
371 			done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
372 			break;
373 		}
374 	}
375 	if (!done && !error)
376 		error = ETIMEDOUT;
377 	if (error)
378 		printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
379 	return (error);
380 }
381 
382 int
383 cfi_write_block(struct cfi_softc *sc)
384 {
385 	union {
386 		uint8_t		*x8;
387 		uint16_t	*x16;
388 		uint32_t	*x32;
389 	} ptr;
390 	register_t intr;
391 	int error, i;
392 
393 	/* Erase the block. */
394 	switch (sc->sc_cmdset) {
395 	case CFI_VEND_INTEL_ECS:
396 	case CFI_VEND_INTEL_SCS:
397 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
398 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
399 		break;
400 	case CFI_VEND_AMD_SCS:
401 	case CFI_VEND_AMD_ECS:
402 		cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
403 		    CFI_AMD_ERASE_SECTOR);
404 		cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
405 		break;
406 	default:
407 		/* Better safe than sorry... */
408 		return (ENODEV);
409 	}
410 	error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout);
411 	if (error)
412 		goto out;
413 
414 	/* Write the block. */
415 	ptr.x8 = sc->sc_wrbuf;
416 	for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
417 
418 		/*
419 		 * Make sure the command to start a write and the
420 		 * actual write happens back-to-back without any
421 		 * excessive delays.
422 		 */
423 		intr = intr_disable();
424 
425 		switch (sc->sc_cmdset) {
426 		case CFI_VEND_INTEL_ECS:
427 		case CFI_VEND_INTEL_SCS:
428 			cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
429 			break;
430 		case CFI_VEND_AMD_SCS:
431 		case CFI_VEND_AMD_ECS:
432 			cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
433 			break;
434 		}
435 		switch (sc->sc_width) {
436 		case 1:
437 			bus_space_write_1(sc->sc_tag, sc->sc_handle,
438 			    sc->sc_wrofs + i, *(ptr.x8)++);
439 			break;
440 		case 2:
441 			bus_space_write_2(sc->sc_tag, sc->sc_handle,
442 			    sc->sc_wrofs + i, *(ptr.x16)++);
443 			break;
444 		case 4:
445 			bus_space_write_4(sc->sc_tag, sc->sc_handle,
446 			    sc->sc_wrofs + i, *(ptr.x32)++);
447 			break;
448 		}
449 
450 		intr_restore(intr);
451 
452 		error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout);
453 		if (error)
454 			goto out;
455 	}
456 
457 	/* error is 0. */
458 
459  out:
460 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
461 	return (error);
462 }
463 
464 #ifdef CFI_SUPPORT_STRATAFLASH
465 /*
466  * Intel StrataFlash Protection Register Support.
467  *
468  * The memory includes a 128-bit Protection Register that can be
469  * used for security.  There are two 64-bit segments; one is programmed
470  * at the factory with a unique 64-bit number which is immutable.
471  * The other segment is left blank for User (OEM) programming.
472  * The User/OEM segment is One Time Programmable (OTP).  It can also
473  * be locked to prevent any further writes by setting bit 0 of the
474  * Protection Lock Register (PLR).  The PLR can written only once.
475  */
476 
477 static uint16_t
478 cfi_get16(struct cfi_softc *sc, int off)
479 {
480 	uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
481 	return v;
482 }
483 
484 #ifdef CFI_ARMEDANDDANGEROUS
485 static void
486 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
487 {
488 	bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
489 }
490 #endif
491 
492 /*
493  * Read the factory-defined 64-bit segment of the PR.
494  */
495 int
496 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
497 {
498 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
499 		return EOPNOTSUPP;
500 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
501 
502 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
503 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
504 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
505 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
506 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
507 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
508 	return 0;
509 }
510 
511 /*
512  * Read the User/OEM 64-bit segment of the PR.
513  */
514 int
515 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
516 {
517 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
518 		return EOPNOTSUPP;
519 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
520 
521 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
522 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
523 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
524 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
525 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
526 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
527 	return 0;
528 }
529 
530 /*
531  * Write the User/OEM 64-bit segment of the PR.
532  * XXX should allow writing individual words/bytes
533  */
534 int
535 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
536 {
537 #ifdef CFI_ARMEDANDDANGEROUS
538 	register_t intr;
539 	int i, error;
540 #endif
541 
542 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
543 		return EOPNOTSUPP;
544 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
545 
546 #ifdef CFI_ARMEDANDDANGEROUS
547 	for (i = 7; i >= 4; i--, id >>= 16) {
548 		intr = intr_disable();
549 		cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
550 		cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
551 		intr_restore(intr);
552 		error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS,
553 		    sc->sc_write_timeout);
554 		if (error)
555 			break;
556 	}
557 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
558 	return error;
559 #else
560 	device_printf(sc->sc_dev, "%s: OEM PR not set, "
561 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
562 	return ENXIO;
563 #endif
564 }
565 
566 /*
567  * Read the contents of the Protection Lock Register.
568  */
569 int
570 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
571 {
572 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
573 		return EOPNOTSUPP;
574 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
575 
576 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
577 	*plr = cfi_get16(sc, CFI_INTEL_PLR);
578 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
579 	return 0;
580 }
581 
582 /*
583  * Write the Protection Lock Register to lock down the
584  * user-settable segment of the Protection Register.
585  * NOTE: this operation is not reversible.
586  */
587 int
588 cfi_intel_set_plr(struct cfi_softc *sc)
589 {
590 #ifdef CFI_ARMEDANDDANGEROUS
591 	register_t intr;
592 	int error;
593 #endif
594 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
595 		return EOPNOTSUPP;
596 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
597 
598 #ifdef CFI_ARMEDANDDANGEROUS
599 	/* worthy of console msg */
600 	device_printf(sc->sc_dev, "set PLR\n");
601 	intr = intr_disable();
602 	cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
603 	cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
604 	intr_restore(intr);
605 	error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout);
606 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
607 	return error;
608 #else
609 	device_printf(sc->sc_dev, "%s: PLR not set, "
610 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
611 	return ENXIO;
612 #endif
613 }
614 #endif /* CFI_SUPPORT_STRATAFLASH */
615