xref: /freebsd/sys/dev/cfi/cfi_core.c (revision 39beb93c3f8bdbf72a61fda42300b5ebed7390c8)
1 /*-
2  * Copyright (c) 2007, Juniper Networks, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the name of the author nor the names of any co-contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include "opt_cfi.h"
34 
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/rman.h>
43 #include <sys/sysctl.h>
44 
45 #include <machine/bus.h>
46 
47 #include <dev/cfi/cfi_reg.h>
48 #include <dev/cfi/cfi_var.h>
49 
50 extern struct cdevsw cfi_cdevsw;
51 
52 char cfi_driver_name[] = "cfi";
53 devclass_t cfi_devclass;
54 
55 uint32_t
56 cfi_read(struct cfi_softc *sc, u_int ofs)
57 {
58 	uint32_t val;
59 
60 	ofs &= ~(sc->sc_width - 1);
61 	switch (sc->sc_width) {
62 	case 1:
63 		val = bus_space_read_1(sc->sc_tag, sc->sc_handle, ofs);
64 		break;
65 	case 2:
66 		val = bus_space_read_2(sc->sc_tag, sc->sc_handle, ofs);
67 		break;
68 	case 4:
69 		val = bus_space_read_4(sc->sc_tag, sc->sc_handle, ofs);
70 		break;
71 	default:
72 		val = ~0;
73 		break;
74 	}
75 	return (val);
76 }
77 
78 static void
79 cfi_write(struct cfi_softc *sc, u_int ofs, u_int val)
80 {
81 
82 	ofs &= ~(sc->sc_width - 1);
83 	switch (sc->sc_width) {
84 	case 1:
85 		bus_space_write_1(sc->sc_tag, sc->sc_handle, ofs, val);
86 		break;
87 	case 2:
88 		bus_space_write_2(sc->sc_tag, sc->sc_handle, ofs, val);
89 		break;
90 	case 4:
91 		bus_space_write_4(sc->sc_tag, sc->sc_handle, ofs, val);
92 		break;
93 	}
94 }
95 
96 uint8_t
97 cfi_read_qry(struct cfi_softc *sc, u_int ofs)
98 {
99 	uint8_t val;
100 
101 	cfi_write(sc, CFI_QRY_CMD_ADDR * sc->sc_width, CFI_QRY_CMD_DATA);
102 	val = cfi_read(sc, ofs * sc->sc_width);
103 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
104 	return (val);
105 }
106 
107 static void
108 cfi_amd_write(struct cfi_softc *sc, u_int ofs, u_int addr, u_int data)
109 {
110 
111 	cfi_write(sc, ofs + AMD_ADDR_START, CFI_AMD_UNLOCK);
112 	cfi_write(sc, ofs + AMD_ADDR_ACK, CFI_AMD_UNLOCK_ACK);
113 	cfi_write(sc, ofs + addr, data);
114 }
115 
116 static char *
117 cfi_fmtsize(uint32_t sz)
118 {
119 	static char buf[8];
120 	static const char *sfx[] = { "", "K", "M", "G" };
121 	int sfxidx;
122 
123 	sfxidx = 0;
124 	while (sfxidx < 3 && sz > 1023) {
125 		sz /= 1024;
126 		sfxidx++;
127 	}
128 
129 	sprintf(buf, "%u%sB", sz, sfx[sfxidx]);
130 	return (buf);
131 }
132 
133 int
134 cfi_probe(device_t dev)
135 {
136 	char desc[80];
137 	struct cfi_softc *sc;
138 	char *vend_str;
139 	int error;
140 	uint16_t iface, vend;
141 
142 	sc = device_get_softc(dev);
143 	sc->sc_dev = dev;
144 
145 	sc->sc_rid = 0;
146 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
147 	    RF_ACTIVE);
148 	if (sc->sc_res == NULL)
149 		return (ENXIO);
150 
151 	sc->sc_tag = rman_get_bustag(sc->sc_res);
152 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
153 
154 	if (sc->sc_width == 0) {
155 		sc->sc_width = 1;
156 		while (sc->sc_width <= 4) {
157 			if (cfi_read_qry(sc, CFI_QRY_IDENT) == 'Q')
158 				break;
159 			sc->sc_width <<= 1;
160 		}
161 	} else if (cfi_read_qry(sc, CFI_QRY_IDENT) != 'Q') {
162 		error = ENXIO;
163 		goto out;
164 	}
165 	if (sc->sc_width > 4) {
166 		error = ENXIO;
167 		goto out;
168 	}
169 
170 	/* We got a Q. Check if we also have the R and the Y. */
171 	if (cfi_read_qry(sc, CFI_QRY_IDENT + 1) != 'R' ||
172 	    cfi_read_qry(sc, CFI_QRY_IDENT + 2) != 'Y') {
173 		error = ENXIO;
174 		goto out;
175 	}
176 
177 	/* Get the vendor and command set. */
178 	vend = cfi_read_qry(sc, CFI_QRY_VEND) |
179 	    (cfi_read_qry(sc, CFI_QRY_VEND + 1) << 8);
180 
181 	sc->sc_cmdset = vend;
182 
183 	switch (vend) {
184 	case CFI_VEND_AMD_ECS:
185 	case CFI_VEND_AMD_SCS:
186 		vend_str = "AMD/Fujitsu";
187 		break;
188 	case CFI_VEND_INTEL_ECS:
189 		vend_str = "Intel/Sharp";
190 		break;
191 	case CFI_VEND_INTEL_SCS:
192 		vend_str = "Intel";
193 		break;
194 	case CFI_VEND_MITSUBISHI_ECS:
195 	case CFI_VEND_MITSUBISHI_SCS:
196 		vend_str = "Mitsubishi";
197 		break;
198 	default:
199 		vend_str = "Unknown vendor";
200 		break;
201 	}
202 
203 	/* Get the device size. */
204 	sc->sc_size = 1U << cfi_read_qry(sc, CFI_QRY_SIZE);
205 
206 	/* Sanity-check the I/F */
207 	iface = cfi_read_qry(sc, CFI_QRY_IFACE) |
208 	    (cfi_read_qry(sc, CFI_QRY_IFACE + 1) << 8);
209 
210 	/*
211 	 * Adding 1 to iface will give us a bit-wise "switch"
212 	 * that allows us to test for the interface width by
213 	 * testing a single bit.
214 	 */
215 	iface++;
216 
217 	error = (iface & sc->sc_width) ? 0 : EINVAL;
218 	if (error)
219 		goto out;
220 
221 	snprintf(desc, sizeof(desc), "%s - %s", vend_str,
222 	    cfi_fmtsize(sc->sc_size));
223 	device_set_desc_copy(dev, desc);
224 
225  out:
226 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
227 	return (error);
228 }
229 
230 int
231 cfi_attach(device_t dev)
232 {
233 	struct cfi_softc *sc;
234 	u_int blksz, blocks;
235 	u_int r, u;
236 
237 	sc = device_get_softc(dev);
238 	sc->sc_dev = dev;
239 
240 	sc->sc_rid = 0;
241 	sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rid,
242 	    RF_ACTIVE);
243 	if (sc->sc_res == NULL)
244 		return (ENXIO);
245 
246 	sc->sc_tag = rman_get_bustag(sc->sc_res);
247 	sc->sc_handle = rman_get_bushandle(sc->sc_res);
248 
249 	/* Get time-out values for erase and write. */
250 	sc->sc_write_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_WRITE);
251 	sc->sc_erase_timeout = 1 << cfi_read_qry(sc, CFI_QRY_TTO_ERASE);
252 	sc->sc_write_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_WRITE);
253 	sc->sc_erase_timeout *= 1 << cfi_read_qry(sc, CFI_QRY_MTO_ERASE);
254 
255 	/* Get erase regions. */
256 	sc->sc_regions = cfi_read_qry(sc, CFI_QRY_NREGIONS);
257 	sc->sc_region = malloc(sc->sc_regions * sizeof(struct cfi_region),
258 	    M_TEMP, M_WAITOK | M_ZERO);
259 	for (r = 0; r < sc->sc_regions; r++) {
260 		blocks = cfi_read_qry(sc, CFI_QRY_REGION(r)) |
261 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 1) << 8);
262 		sc->sc_region[r].r_blocks = blocks + 1;
263 
264 		blksz = cfi_read_qry(sc, CFI_QRY_REGION(r) + 2) |
265 		    (cfi_read_qry(sc, CFI_QRY_REGION(r) + 3) << 8);
266 		sc->sc_region[r].r_blksz = (blksz == 0) ? 128 :
267 		    blksz * 256;
268 	}
269 
270 	/* Reset the device to a default state. */
271 	cfi_write(sc, 0, CFI_BCS_CLEAR_STATUS);
272 
273 	if (bootverbose) {
274 		device_printf(dev, "[");
275 		for (r = 0; r < sc->sc_regions; r++) {
276 			printf("%ux%s%s", sc->sc_region[r].r_blocks,
277 			    cfi_fmtsize(sc->sc_region[r].r_blksz),
278 			    (r == sc->sc_regions - 1) ? "]\n" : ",");
279 		}
280 	}
281 
282 	u = device_get_unit(dev);
283 	sc->sc_nod = make_dev(&cfi_cdevsw, u, UID_ROOT, GID_WHEEL, 0600,
284 	    "%s%u", cfi_driver_name, u);
285 	sc->sc_nod->si_drv1 = sc;
286 
287 	return (0);
288 }
289 
290 int
291 cfi_detach(device_t dev)
292 {
293 	struct cfi_softc *sc;
294 
295 	sc = device_get_softc(dev);
296 
297 	destroy_dev(sc->sc_nod);
298 	free(sc->sc_region, M_TEMP);
299 	bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
300 	return (0);
301 }
302 
303 static int
304 cfi_wait_ready(struct cfi_softc *sc, u_int ofs, u_int timeout)
305 {
306 	int done, error;
307 	uint32_t st0 = 0, st = 0;
308 
309 	done = 0;
310 	error = 0;
311 	timeout *= 10;
312 	while (!done && !error && timeout) {
313 		DELAY(100);
314 		timeout--;
315 
316 		switch (sc->sc_cmdset) {
317 		case CFI_VEND_INTEL_ECS:
318 		case CFI_VEND_INTEL_SCS:
319 			st = cfi_read(sc, ofs);
320 			done = (st & CFI_INTEL_STATUS_WSMS);
321 			if (done) {
322 				/* NB: bit 0 is reserved */
323 				st &= ~(CFI_INTEL_XSTATUS_RSVD |
324 					CFI_INTEL_STATUS_WSMS |
325 					CFI_INTEL_STATUS_RSVD);
326 				if (st & CFI_INTEL_STATUS_DPS)
327 					error = EPERM;
328 				else if (st & CFI_INTEL_STATUS_PSLBS)
329 					error = EIO;
330 				else if (st & CFI_INTEL_STATUS_ECLBS)
331 					error = ENXIO;
332 				else if (st)
333 					error = EACCES;
334 			}
335 			break;
336 		case CFI_VEND_AMD_SCS:
337 		case CFI_VEND_AMD_ECS:
338 			st0 = cfi_read(sc, ofs);
339 			st = cfi_read(sc, ofs);
340 			done = ((st & 0x40) == (st0 & 0x40)) ? 1 : 0;
341 			break;
342 		}
343 	}
344 	if (!done && !error)
345 		error = ETIMEDOUT;
346 	if (error)
347 		printf("\nerror=%d (st 0x%x st0 0x%x)\n", error, st, st0);
348 	return (error);
349 }
350 
351 int
352 cfi_write_block(struct cfi_softc *sc)
353 {
354 	union {
355 		uint8_t		*x8;
356 		uint16_t	*x16;
357 		uint32_t	*x32;
358 	} ptr;
359 	register_t intr;
360 	int error, i;
361 
362 	/* Erase the block. */
363 	switch (sc->sc_cmdset) {
364 	case CFI_VEND_INTEL_ECS:
365 	case CFI_VEND_INTEL_SCS:
366 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_BLOCK_ERASE);
367 		cfi_write(sc, sc->sc_wrofs, CFI_BCS_CONFIRM);
368 		break;
369 	case CFI_VEND_AMD_SCS:
370 	case CFI_VEND_AMD_ECS:
371 		cfi_amd_write(sc, sc->sc_wrofs, AMD_ADDR_START,
372 		    CFI_AMD_ERASE_SECTOR);
373 		cfi_amd_write(sc, sc->sc_wrofs, 0, CFI_AMD_BLOCK_ERASE);
374 		break;
375 	default:
376 		/* Better safe than sorry... */
377 		return (ENODEV);
378 	}
379 	error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_erase_timeout);
380 	if (error)
381 		goto out;
382 
383 	/* Write the block. */
384 	ptr.x8 = sc->sc_wrbuf;
385 	for (i = 0; i < sc->sc_wrbufsz; i += sc->sc_width) {
386 
387 		/*
388 		 * Make sure the command to start a write and the
389 		 * actual write happens back-to-back without any
390 		 * excessive delays.
391 		 */
392 		intr = intr_disable();
393 
394 		switch (sc->sc_cmdset) {
395 		case CFI_VEND_INTEL_ECS:
396 		case CFI_VEND_INTEL_SCS:
397 			cfi_write(sc, sc->sc_wrofs + i, CFI_BCS_PROGRAM);
398 			break;
399 		case CFI_VEND_AMD_SCS:
400 		case CFI_VEND_AMD_ECS:
401 			cfi_amd_write(sc, 0, AMD_ADDR_START, CFI_AMD_PROGRAM);
402 			break;
403 		}
404 		switch (sc->sc_width) {
405 		case 1:
406 			bus_space_write_1(sc->sc_tag, sc->sc_handle,
407 			    sc->sc_wrofs + i, *(ptr.x8)++);
408 			break;
409 		case 2:
410 			bus_space_write_2(sc->sc_tag, sc->sc_handle,
411 			    sc->sc_wrofs + i, *(ptr.x16)++);
412 			break;
413 		case 4:
414 			bus_space_write_4(sc->sc_tag, sc->sc_handle,
415 			    sc->sc_wrofs + i, *(ptr.x32)++);
416 			break;
417 		}
418 
419 		intr_restore(intr);
420 
421 		error = cfi_wait_ready(sc, sc->sc_wrofs, sc->sc_write_timeout);
422 		if (error)
423 			goto out;
424 	}
425 
426 	/* error is 0. */
427 
428  out:
429 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
430 	return (error);
431 }
432 
433 #ifdef CFI_SUPPORT_STRATAFLASH
434 /*
435  * Intel StrataFlash Protection Register Support.
436  *
437  * The memory includes a 128-bit Protection Register that can be
438  * used for security.  There are two 64-bit segments; one is programmed
439  * at the factory with a unique 64-bit number which is immutable.
440  * The other segment is left blank for User (OEM) programming.
441  * The User/OEM segment is One Time Programmable (OTP).  It can also
442  * be locked to prevent any further writes by setting bit 0 of the
443  * Protection Lock Register (PLR).  The PLR can written only once.
444  */
445 
446 static uint16_t
447 cfi_get16(struct cfi_softc *sc, int off)
448 {
449 	uint16_t v = bus_space_read_2(sc->sc_tag, sc->sc_handle, off<<1);
450 	return v;
451 }
452 
453 #ifdef CFI_ARMEDANDDANGEROUS
454 static void
455 cfi_put16(struct cfi_softc *sc, int off, uint16_t v)
456 {
457 	bus_space_write_2(sc->sc_tag, sc->sc_handle, off<<1, v);
458 }
459 #endif
460 
461 /*
462  * Read the factory-defined 64-bit segment of the PR.
463  */
464 int
465 cfi_intel_get_factory_pr(struct cfi_softc *sc, uint64_t *id)
466 {
467 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
468 		return EOPNOTSUPP;
469 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
470 
471 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
472 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(0)))<<48 |
473 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(1)))<<32 |
474 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(2)))<<16 |
475 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(3)));
476 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
477 	return 0;
478 }
479 
480 /*
481  * Read the User/OEM 64-bit segment of the PR.
482  */
483 int
484 cfi_intel_get_oem_pr(struct cfi_softc *sc, uint64_t *id)
485 {
486 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
487 		return EOPNOTSUPP;
488 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
489 
490 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
491 	*id = ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(4)))<<48 |
492 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(5)))<<32 |
493 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(6)))<<16 |
494 	      ((uint64_t)cfi_get16(sc, CFI_INTEL_PR(7)));
495 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
496 	return 0;
497 }
498 
499 /*
500  * Write the User/OEM 64-bit segment of the PR.
501  * XXX should allow writing individual words/bytes
502  */
503 int
504 cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id)
505 {
506 #ifdef CFI_ARMEDANDDANGEROUS
507 	register_t intr;
508 	int i, error;
509 #endif
510 
511 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
512 		return EOPNOTSUPP;
513 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
514 
515 #ifdef CFI_ARMEDANDDANGEROUS
516 	for (i = 7; i >= 4; i--, id >>= 16) {
517 		intr = intr_disable();
518 		cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
519 		cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff);
520 		intr_restore(intr);
521 		error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS,
522 		    sc->sc_write_timeout);
523 		if (error)
524 			break;
525 	}
526 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
527 	return error;
528 #else
529 	device_printf(sc->sc_dev, "%s: OEM PR not set, "
530 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
531 	return ENXIO;
532 #endif
533 }
534 
535 /*
536  * Read the contents of the Protection Lock Register.
537  */
538 int
539 cfi_intel_get_plr(struct cfi_softc *sc, uint32_t *plr)
540 {
541 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
542 		return EOPNOTSUPP;
543 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
544 
545 	cfi_write(sc, 0, CFI_INTEL_READ_ID);
546 	*plr = cfi_get16(sc, CFI_INTEL_PLR);
547 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
548 	return 0;
549 }
550 
551 /*
552  * Write the Protection Lock Register to lock down the
553  * user-settable segment of the Protection Register.
554  * NOTE: this operation is not reversible.
555  */
556 int
557 cfi_intel_set_plr(struct cfi_softc *sc)
558 {
559 #ifdef CFI_ARMEDANDDANGEROUS
560 	register_t intr;
561 	int error;
562 #endif
563 	if (sc->sc_cmdset != CFI_VEND_INTEL_ECS)
564 		return EOPNOTSUPP;
565 	KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width));
566 
567 #ifdef CFI_ARMEDANDDANGEROUS
568 	/* worthy of console msg */
569 	device_printf(sc->sc_dev, "set PLR\n");
570 	intr = intr_disable();
571 	cfi_write(sc, 0, CFI_INTEL_PP_SETUP);
572 	cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD);
573 	intr_restore(intr);
574 	error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout);
575 	cfi_write(sc, 0, CFI_BCS_READ_ARRAY);
576 	return error;
577 #else
578 	device_printf(sc->sc_dev, "%s: PLR not set, "
579 	    "CFI_ARMEDANDDANGEROUS not configured\n", __func__);
580 	return ENXIO;
581 #endif
582 }
583 #endif /* CFI_SUPPORT_STRATAFLASH */
584