xref: /freebsd/sys/dev/cfi/cfi_core.c (revision 884a2a699669ec61e2366e3e358342dbc94be24a)
1 /*-
2  * Copyright (c) 2007, Juniper Networks, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_cfi.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/rman.h>
43 #include <sys/sysctl.h>
44 
45 #include <machine/bus.h>
46 
47 #include <dev/cfi/cfi_reg.h>
48 #include <dev/cfi/cfi_var.h>
49 
50 extern struct cdevsw cfi_cdevsw;
51 
52 char cfi_driver_name[] = "cfi";
53 devclass_t cfi_devclass;
54 devclass_t cfi_diskclass;
55 
56 uint32_t
57 cfi_read(struct cfi_softc *sc, u_int ofs)
58 {
59 	uint32_t val;
60 
61 	ofs &= ~(sc->sc_width - 1);
62 	switch (sc->sc_width) {
63 	case 1:
64 		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
65 		break;
66 	case 2:
67 		val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
68 		break;
69 	case 4:
70 		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
71 		break;
72 	default:
73 		val = ~0;
74 		break;
75 	}
76 	return (val);
77 }
78 
79 static void
80 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
81 {
82 
83 	ofs &= ~(sc->sc_width - 1);
84 	switch (sc->sc_width) {
85 	case 1:
86 		bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
87 		break;
88 	case 2:
89 		bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
90 		break;
91 	case 4:
92 		bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
93 		break;
94 	}
95 }
96 
97 uint8_t
98 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
99 {
100 	uint8_t val;
101 
102 	cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
103 	val = cfi_read(sc, ofs * sc->sc_width);
104 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
105 	return (val);
106 }
107 
108 static void
109 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
110 {
111 
112 	cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
113 	cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
114 	cfi_write(sc, ofs + addr, data);
115 }
116 
117 static char *
118 cfi_fmtsize(uint32_t sz)
119 {
120 	static char buf[8];
121 	static const char *sfx[] = { "", "K", "M", "G" };
122 	int sfxidx;
123 
124 	sfxidx = 0;
125 	while (sfxidx < 3 && sz > 1023) {
126 		sz /= 1024;
127 		sfxidx++;
128 	}
129 
130 	sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
131 	return (buf);
132 }
133 
134 int
135 cfi_probe(device_t dev)
136 {
137 	char desc[80];
138 	struct cfi_softc *sc;
139 	char *vend_str;
140 	int error;
141 	uint16_t iface, vend;
142 
143 	sc = device_get_softc(dev);
144 	sc->sc_dev = dev;
145 
146 	sc->sc_rid = 0;
147 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
148 	    RF_ACTIVE);
149 	if (sc->sc_res == NULL)
150 		return (ENXIO);
151 
152 	sc->sc_tag = rman_get_bustag(sc->sc_res);
153 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
154 
155 	if (sc->sc_width == 0) {
156 		sc->sc_width = 1;
157 		while (sc->sc_width <= 4) {
158 			if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
159 				break;
160 			sc->sc_width <<= 1;
161 		}
162 	} else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
163 		error = ENXIO;
164 		goto out;
165 	}
166 	if (sc->sc_width > 4) {
167 		error = ENXIO;
168 		goto out;
169 	}
170 
171 	/* We got a Q. Check if we also have the R and the Y. */
172 	if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
173 	    cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
174 		error = ENXIO;
175 		goto out;
176 	}
177 
178 	/* Get the vendor and command set. */
179 	vend = cfi_read_qry(sc, CFI_QRY_VEND) |
180 	    (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
181 
182 	sc->sc_cmdset = vend;
183 
184 	switch (vend) {
185 	case CFI_VEND_AMD_ECS:
186 	case CFI_VEND_AMD_SCS:
187 		vend_str = "AMD/Fujitsu";
188 		break;
189 	case CFI_VEND_INTEL_ECS:
190 		vend_str = "Intel/Sharp";
191 		break;
192 	case CFI_VEND_INTEL_SCS:
193 		vend_str = "Intel";
194 		break;
195 	case CFI_VEND_MITSUBISHI_ECS:
196 	case CFI_VEND_MITSUBISHI_SCS:
197 		vend_str = "Mitsubishi";
198 		break;
199 	default:
200 		vend_str = "Unknown vendor";
201 		break;
202 	}
203 
204 	/* Get the device size. */
205 	sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
206 
207 	/* Sanity-check the I/F */
208 	iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
209 	    (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
210 
211 	/*
212 	 * Adding 1 to iface will give us a bit-wise "switch"
213 	 * that allows us to test for the interface width by
214 	 * testing a single bit.
215 	 */
216 	iface++;
217 
218 	error = (iface & sc->sc_width) ? 0 : EINVAL;
219 	if (error)
220 		goto out;
221 
222 	snprintf(desc, sizeof(desc), "%s - %s", vend_str,
223 	    cfi_fmtsize(sc->sc_size));
224 	device_set_desc_copy(dev, desc);
225 
226  out:
227 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
228 	return (error);
229 }
230 
231 int
232 cfi_attach(device_t dev)
233 {
234 	struct cfi_softc *sc;
235 	u_int blksz, blocks;
236 	u_int r, u;
237 
238 	sc = device_get_softc(dev);
239 	sc->sc_dev = dev;
240 
241 	sc->sc_rid = 0;
242 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
243 	    RF_ACTIVE);
244 	if (sc->sc_res == NULL)
245 		return (ENXIO);
246 
247 	sc->sc_tag = rman_get_bustag(sc->sc_res);
248 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
249 
250 	/* Get time-out values for erase and write. */
251 	sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
252 	sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
253 	sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
254 	sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
255 
256 	/* Get erase regions. */
257 	sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
258 	sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
259 	    M_TEMP, M_WAITOK | M_ZERO);
260 	for (r = 0; r < sc->sc_regions; r++) {
261 		blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
262 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
263 		sc->sc_region[r].r_blocks = blocks + 1;
264 
265 		blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
266 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
267 		sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
268 		    blksz * 256;
269 	}
270 
271 	/* Reset the device to a default state. */
272 	cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
273 
274 	if (bootverbose) {
275 		device_printf(dev, "[");
276 		for (r = 0; r < sc->sc_regions; r++) {
277 			printf("%ux%s%s", sc->sc_region[r].r_blocks,
278 			    cfi_fmtsize(sc->sc_region[r].r_blksz),
279 			    (r == sc->sc_regions - 1) ? "]\n" : ",");
280 		}
281 	}
282 
283 	u = device_get_unit(dev);
284 	sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
285 	    "%s%u", cfi_driver_name, u);
286 	sc->sc_nod->si_drv1 = sc;
287 
288 	device_add_child(dev, "cfid", -1);
289 	bus_generic_attach(dev);
290 
291 	return (0);
292 }
293 
294 int
295 cfi_detach(device_t dev)
296 {
297 	struct cfi_softc *sc;
298 
299 	sc = device_get_softc(dev);
300 
301 	destroy_dev(sc->sc_nod);
302 	free(sc->sc_region, M_TEMP);
303 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
304 	return (0);
305 }
306 
307 static int
308 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
309 {
310 	int done, error;
311 	uint32_t st0 = 0, st = 0;
312 
313 	done = 0;
314 	error = 0;
315 	timeout *= 10;
316 	while (!done && !error && timeout) {
317 		DELAY(100);
318 		timeout--;
319 
320 		switch (sc->sc_cmdset) {
321 		case CFI_VEND_INTEL_ECS:
322 		case CFI_VEND_INTEL_SCS:
323 			st = cfi_read(sc, ofs);
324 			done = (st & CFI_INTEL_STATUS_WSMS);
325 			if (done) {
326 				/* NB: bit 0 is reserved */
327 				st &= ~(CFI_INTEL_XSTATUS_RSVD |
328 					CFI_INTEL_STATUS_WSMS |
329 					CFI_INTEL_STATUS_RSVD);
330 				if (st & CFI_INTEL_STATUS_DPS)
331 					error = EPERM;
332 				else if (st & CFI_INTEL_STATUS_PSLBS)
333 					error = EIO;
334 				else if (st & CFI_INTEL_STATUS_ECLBS)
335 					error = ENXIO;
336 				else if (st)
337 					error = EACCES;
338 			}
339 			break;
340 		case CFI_VEND_AMD_SCS:
341 		case CFI_VEND_AMD_ECS:
342 			st0 = cfi_read(sc, ofs);
343 			st = cfi_read(sc, ofs);
344 			done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
345 			break;
346 		}
347 	}
348 	if (!done && !error)
349 		error = ETIMEDOUT;
350 	if (error)
351 		printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
352 	return (error);
353 }
354 
355 int
356 cfi_write_block(struct cfi_softc *sc)
357 {
358 	union {
359 		uint8_t		*x8;
360 		uint16_t	*x16;
361 		uint32_t	*x32;
362 	} ptr;
363 	register_t intr;
364 	int error, i;
365 
366 	/* Erase the block. */
367 	switch (sc->sc_cmdset) {
368 	case CFI_VEND_INTEL_ECS:
369 	case CFI_VEND_INTEL_SCS:
370 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
371 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
372 		break;
373 	case CFI_VEND_AMD_SCS:
374 	case CFI_VEND_AMD_ECS:
375 		cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
376 		    CFI_AMD_ERASE_SECTOR);
377 		cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
378 		break;
379 	default:
380 		/* Better safe than sorry... */
381 		return (ENODEV);
382 	}
383 	error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout);
384 	if (error)
385 		goto out;
386 
387 	/* Write the block. */
388 	ptr.x8 = sc->sc_wrbuf;
389 	for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
390 
391 		/*
392 		 * Make sure the command to start a write and the
393 		 * actual write happens back-to-back without any
394 		 * excessive delays.
395 		 */
396 		intr = intr_disable();
397 
398 		switch (sc->sc_cmdset) {
399 		case CFI_VEND_INTEL_ECS:
400 		case CFI_VEND_INTEL_SCS:
401 			cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
402 			break;
403 		case CFI_VEND_AMD_SCS:
404 		case CFI_VEND_AMD_ECS:
405 			cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
406 			break;
407 		}
408 		switch (sc->sc_width) {
409 		case 1:
410 			bus_space_write_1(sc->sc_tag, sc->sc_handle,
411 			    sc->sc_wrofs + i, *(ptr.x8)++);
412 			break;
413 		case 2:
414 			bus_space_write_2(sc->sc_tag, sc->sc_handle,
415 			    sc->sc_wrofs + i, *(ptr.x16)++);
416 			break;
417 		case 4:
418 			bus_space_write_4(sc->sc_tag, sc->sc_handle,
419 			    sc->sc_wrofs + i, *(ptr.x32)++);
420 			break;
421 		}
422 
423 		intr_restore(intr);
424 
425 		error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout);
426 		if (error)
427 			goto out;
428 	}
429 
430 	/* error is 0. */
431 
432  out:
433 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
434 	return (error);
435 }
436 
437 #ifdef CFI_SUPPORT_STRATAFLASH
438 /*
439  * Intel StrataFlash Protection Register Support.
440  *
441  * The memory includes a 128-bit Protection Register that can be
442  * used for security.  There are two 64-bit segments; one is programmed
443  * at the factory with a unique 64-bit number which is immutable.
444  * The other segment is left blank for User (OEM) programming.
445  * The User/OEM segment is One Time Programmable (OTP).  It can also
446  * be locked to prevent any further writes by setting bit 0 of the
447  * Protection Lock Register (PLR).  The PLR can written only once.
448  */
449 
450 static uint16_t
451 cfi_get16(struct cfi_softc *sc, int off)
452 {
453 	uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
454 	return v;
455 }
456 
457 #ifdef CFI_ARMEDANDDANGEROUS
458 static void
459 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
460 {
461 	bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
462 }
463 #endif
464 
465 /*
466  * Read the factory-defined 64-bit segment of the PR.
467  */
468 int
469 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
470 {
471 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
472 		return EOPNOTSUPP;
473 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
474 
475 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
476 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
477 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
478 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
479 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
480 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
481 	return 0;
482 }
483 
484 /*
485  * Read the User/OEM 64-bit segment of the PR.
486  */
487 int
488 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
489 {
490 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
491 		return EOPNOTSUPP;
492 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
493 
494 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
495 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
496 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
497 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
498 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
499 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
500 	return 0;
501 }
502 
503 /*
504  * Write the User/OEM 64-bit segment of the PR.
505  * XXX should allow writing individual words/bytes
506  */
507 int
508 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
509 {
510 #ifdef CFI_ARMEDANDDANGEROUS
511 	register_t intr;
512 	int i, error;
513 #endif
514 
515 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
516 		return EOPNOTSUPP;
517 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
518 
519 #ifdef CFI_ARMEDANDDANGEROUS
520 	for (i = 7; i >= 4; i--, id >>= 16) {
521 		intr = intr_disable();
522 		cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
523 		cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
524 		intr_restore(intr);
525 		error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS,
526 		    sc->sc_write_timeout);
527 		if (error)
528 			break;
529 	}
530 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
531 	return error;
532 #else
533 	device_printf(sc->sc_dev, "%s: OEM PR not set, "
534 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
535 	return ENXIO;
536 #endif
537 }
538 
539 /*
540  * Read the contents of the Protection Lock Register.
541  */
542 int
543 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
544 {
545 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
546 		return EOPNOTSUPP;
547 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
548 
549 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
550 	*plr = cfi_get16(sc, CFI_INTEL_PLR);
551 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
552 	return 0;
553 }
554 
555 /*
556  * Write the Protection Lock Register to lock down the
557  * user-settable segment of the Protection Register.
558  * NOTE: this operation is not reversible.
559  */
560 int
561 cfi_intel_set_plr(struct cfi_softc *sc)
562 {
563 #ifdef CFI_ARMEDANDDANGEROUS
564 	register_t intr;
565 	int error;
566 #endif
567 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
568 		return EOPNOTSUPP;
569 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
570 
571 #ifdef CFI_ARMEDANDDANGEROUS
572 	/* worthy of console msg */
573 	device_printf(sc->sc_dev, "set PLR\n");
574 	intr = intr_disable();
575 	cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
576 	cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
577 	intr_restore(intr);
578 	error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout);
579 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
580 	return error;
581 #else
582 	device_printf(sc->sc_dev, "%s: PLR not set, "
583 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
584 	return ENXIO;
585 #endif
586 }
587 #endif /* CFI_SUPPORT_STRATAFLASH */
588