xref: /linux/fs/pstore/ram_core.c (revision 5ff328836dfde0cef9f28c8b8791a90a36d7a183)
1 /*
2  * Copyright (C) 2012 Google, Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/device.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/kernel.h>
23 #include <linux/list.h>
24 #include <linux/memblock.h>
25 #include <linux/pstore_ram.h>
26 #include <linux/rslib.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <asm/page.h>
31 
32 /**
33  * struct persistent_ram_buffer - persistent circular RAM buffer
34  *
35  * @sig:
36  *	signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value)
37  * @start:
38  *	offset into @data where the beginning of the stored bytes begin
39  * @size:
40  *	number of valid bytes stored in @data
41  */
42 struct persistent_ram_buffer {
43 	uint32_t    sig;
44 	atomic_t    start;
45 	atomic_t    size;
46 	uint8_t     data[0];
47 };
48 
49 #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
50 
51 static inline size_t buffer_size(struct persistent_ram_zone *prz)
52 {
53 	return atomic_read(&prz->buffer->size);
54 }
55 
56 static inline size_t buffer_start(struct persistent_ram_zone *prz)
57 {
58 	return atomic_read(&prz->buffer->start);
59 }
60 
61 /* increase and wrap the start pointer, returning the old value */
62 static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
63 {
64 	int old;
65 	int new;
66 	unsigned long flags = 0;
67 
68 	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
69 		raw_spin_lock_irqsave(&prz->buffer_lock, flags);
70 
71 	old = atomic_read(&prz->buffer->start);
72 	new = old + a;
73 	while (unlikely(new >= prz->buffer_size))
74 		new -= prz->buffer_size;
75 	atomic_set(&prz->buffer->start, new);
76 
77 	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
78 		raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
79 
80 	return old;
81 }
82 
83 /* increase the size counter until it hits the max size */
84 static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
85 {
86 	size_t old;
87 	size_t new;
88 	unsigned long flags = 0;
89 
90 	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
91 		raw_spin_lock_irqsave(&prz->buffer_lock, flags);
92 
93 	old = atomic_read(&prz->buffer->size);
94 	if (old == prz->buffer_size)
95 		goto exit;
96 
97 	new = old + a;
98 	if (new > prz->buffer_size)
99 		new = prz->buffer_size;
100 	atomic_set(&prz->buffer->size, new);
101 
102 exit:
103 	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
104 		raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
105 }
106 
107 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
108 	uint8_t *data, size_t len, uint8_t *ecc)
109 {
110 	int i;
111 
112 	/* Initialize the parity buffer */
113 	memset(prz->ecc_info.par, 0,
114 	       prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0]));
115 	encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0);
116 	for (i = 0; i < prz->ecc_info.ecc_size; i++)
117 		ecc[i] = prz->ecc_info.par[i];
118 }
119 
120 static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
121 	void *data, size_t len, uint8_t *ecc)
122 {
123 	int i;
124 
125 	for (i = 0; i < prz->ecc_info.ecc_size; i++)
126 		prz->ecc_info.par[i] = ecc[i];
127 	return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len,
128 				NULL, 0, NULL, 0, NULL);
129 }
130 
131 static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
132 	unsigned int start, unsigned int count)
133 {
134 	struct persistent_ram_buffer *buffer = prz->buffer;
135 	uint8_t *buffer_end = buffer->data + prz->buffer_size;
136 	uint8_t *block;
137 	uint8_t *par;
138 	int ecc_block_size = prz->ecc_info.block_size;
139 	int ecc_size = prz->ecc_info.ecc_size;
140 	int size = ecc_block_size;
141 
142 	if (!ecc_size)
143 		return;
144 
145 	block = buffer->data + (start & ~(ecc_block_size - 1));
146 	par = prz->par_buffer + (start / ecc_block_size) * ecc_size;
147 
148 	do {
149 		if (block + ecc_block_size > buffer_end)
150 			size = buffer_end - block;
151 		persistent_ram_encode_rs8(prz, block, size, par);
152 		block += ecc_block_size;
153 		par += ecc_size;
154 	} while (block < buffer->data + start + count);
155 }
156 
157 static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
158 {
159 	struct persistent_ram_buffer *buffer = prz->buffer;
160 
161 	if (!prz->ecc_info.ecc_size)
162 		return;
163 
164 	persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
165 				  prz->par_header);
166 }
167 
168 static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
169 {
170 	struct persistent_ram_buffer *buffer = prz->buffer;
171 	uint8_t *block;
172 	uint8_t *par;
173 
174 	if (!prz->ecc_info.ecc_size)
175 		return;
176 
177 	block = buffer->data;
178 	par = prz->par_buffer;
179 	while (block < buffer->data + buffer_size(prz)) {
180 		int numerr;
181 		int size = prz->ecc_info.block_size;
182 		if (block + size > buffer->data + prz->buffer_size)
183 			size = buffer->data + prz->buffer_size - block;
184 		numerr = persistent_ram_decode_rs8(prz, block, size, par);
185 		if (numerr > 0) {
186 			pr_devel("error in block %p, %d\n", block, numerr);
187 			prz->corrected_bytes += numerr;
188 		} else if (numerr < 0) {
189 			pr_devel("uncorrectable error in block %p\n", block);
190 			prz->bad_blocks++;
191 		}
192 		block += prz->ecc_info.block_size;
193 		par += prz->ecc_info.ecc_size;
194 	}
195 }
196 
197 static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
198 				   struct persistent_ram_ecc_info *ecc_info)
199 {
200 	int numerr;
201 	struct persistent_ram_buffer *buffer = prz->buffer;
202 	int ecc_blocks;
203 	size_t ecc_total;
204 
205 	if (!ecc_info || !ecc_info->ecc_size)
206 		return 0;
207 
208 	prz->ecc_info.block_size = ecc_info->block_size ?: 128;
209 	prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16;
210 	prz->ecc_info.symsize = ecc_info->symsize ?: 8;
211 	prz->ecc_info.poly = ecc_info->poly ?: 0x11d;
212 
213 	ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size,
214 				  prz->ecc_info.block_size +
215 				  prz->ecc_info.ecc_size);
216 	ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size;
217 	if (ecc_total >= prz->buffer_size) {
218 		pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
219 		       __func__, prz->ecc_info.ecc_size,
220 		       ecc_total, prz->buffer_size);
221 		return -EINVAL;
222 	}
223 
224 	prz->buffer_size -= ecc_total;
225 	prz->par_buffer = buffer->data + prz->buffer_size;
226 	prz->par_header = prz->par_buffer +
227 			  ecc_blocks * prz->ecc_info.ecc_size;
228 
229 	/*
230 	 * first consecutive root is 0
231 	 * primitive element to generate roots = 1
232 	 */
233 	prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly,
234 				  0, 1, prz->ecc_info.ecc_size);
235 	if (prz->rs_decoder == NULL) {
236 		pr_info("init_rs failed\n");
237 		return -EINVAL;
238 	}
239 
240 	/* allocate workspace instead of using stack VLA */
241 	prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size,
242 					  sizeof(*prz->ecc_info.par),
243 					  GFP_KERNEL);
244 	if (!prz->ecc_info.par) {
245 		pr_err("cannot allocate ECC parity workspace\n");
246 		return -ENOMEM;
247 	}
248 
249 	prz->corrected_bytes = 0;
250 	prz->bad_blocks = 0;
251 
252 	numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
253 					   prz->par_header);
254 	if (numerr > 0) {
255 		pr_info("error in header, %d\n", numerr);
256 		prz->corrected_bytes += numerr;
257 	} else if (numerr < 0) {
258 		pr_info("uncorrectable error in header\n");
259 		prz->bad_blocks++;
260 	}
261 
262 	return 0;
263 }
264 
265 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
266 	char *str, size_t len)
267 {
268 	ssize_t ret;
269 
270 	if (!prz->ecc_info.ecc_size)
271 		return 0;
272 
273 	if (prz->corrected_bytes || prz->bad_blocks)
274 		ret = snprintf(str, len, ""
275 			"\n%d Corrected bytes, %d unrecoverable blocks\n",
276 			prz->corrected_bytes, prz->bad_blocks);
277 	else
278 		ret = snprintf(str, len, "\nNo errors detected\n");
279 
280 	return ret;
281 }
282 
283 static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
284 	const void *s, unsigned int start, unsigned int count)
285 {
286 	struct persistent_ram_buffer *buffer = prz->buffer;
287 	memcpy_toio(buffer->data + start, s, count);
288 	persistent_ram_update_ecc(prz, start, count);
289 }
290 
291 static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
292 	const void __user *s, unsigned int start, unsigned int count)
293 {
294 	struct persistent_ram_buffer *buffer = prz->buffer;
295 	int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
296 		-EFAULT : 0;
297 	persistent_ram_update_ecc(prz, start, count);
298 	return ret;
299 }
300 
301 void persistent_ram_save_old(struct persistent_ram_zone *prz)
302 {
303 	struct persistent_ram_buffer *buffer = prz->buffer;
304 	size_t size = buffer_size(prz);
305 	size_t start = buffer_start(prz);
306 
307 	if (!size)
308 		return;
309 
310 	if (!prz->old_log) {
311 		persistent_ram_ecc_old(prz);
312 		prz->old_log = kmalloc(size, GFP_KERNEL);
313 	}
314 	if (!prz->old_log) {
315 		pr_err("failed to allocate buffer\n");
316 		return;
317 	}
318 
319 	prz->old_log_size = size;
320 	memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
321 	memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
322 }
323 
324 int notrace persistent_ram_write(struct persistent_ram_zone *prz,
325 	const void *s, unsigned int count)
326 {
327 	int rem;
328 	int c = count;
329 	size_t start;
330 
331 	if (unlikely(c > prz->buffer_size)) {
332 		s += c - prz->buffer_size;
333 		c = prz->buffer_size;
334 	}
335 
336 	buffer_size_add(prz, c);
337 
338 	start = buffer_start_add(prz, c);
339 
340 	rem = prz->buffer_size - start;
341 	if (unlikely(rem < c)) {
342 		persistent_ram_update(prz, s, start, rem);
343 		s += rem;
344 		c -= rem;
345 		start = 0;
346 	}
347 	persistent_ram_update(prz, s, start, c);
348 
349 	persistent_ram_update_header_ecc(prz);
350 
351 	return count;
352 }
353 
354 int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
355 	const void __user *s, unsigned int count)
356 {
357 	int rem, ret = 0, c = count;
358 	size_t start;
359 
360 	if (unlikely(!access_ok(s, count)))
361 		return -EFAULT;
362 	if (unlikely(c > prz->buffer_size)) {
363 		s += c - prz->buffer_size;
364 		c = prz->buffer_size;
365 	}
366 
367 	buffer_size_add(prz, c);
368 
369 	start = buffer_start_add(prz, c);
370 
371 	rem = prz->buffer_size - start;
372 	if (unlikely(rem < c)) {
373 		ret = persistent_ram_update_user(prz, s, start, rem);
374 		s += rem;
375 		c -= rem;
376 		start = 0;
377 	}
378 	if (likely(!ret))
379 		ret = persistent_ram_update_user(prz, s, start, c);
380 
381 	persistent_ram_update_header_ecc(prz);
382 
383 	return unlikely(ret) ? ret : count;
384 }
385 
386 size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
387 {
388 	return prz->old_log_size;
389 }
390 
391 void *persistent_ram_old(struct persistent_ram_zone *prz)
392 {
393 	return prz->old_log;
394 }
395 
396 void persistent_ram_free_old(struct persistent_ram_zone *prz)
397 {
398 	kfree(prz->old_log);
399 	prz->old_log = NULL;
400 	prz->old_log_size = 0;
401 }
402 
403 void persistent_ram_zap(struct persistent_ram_zone *prz)
404 {
405 	atomic_set(&prz->buffer->start, 0);
406 	atomic_set(&prz->buffer->size, 0);
407 	persistent_ram_update_header_ecc(prz);
408 }
409 
410 static void *persistent_ram_vmap(phys_addr_t start, size_t size,
411 		unsigned int memtype)
412 {
413 	struct page **pages;
414 	phys_addr_t page_start;
415 	unsigned int page_count;
416 	pgprot_t prot;
417 	unsigned int i;
418 	void *vaddr;
419 
420 	page_start = start - offset_in_page(start);
421 	page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
422 
423 	if (memtype)
424 		prot = pgprot_noncached(PAGE_KERNEL);
425 	else
426 		prot = pgprot_writecombine(PAGE_KERNEL);
427 
428 	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
429 	if (!pages) {
430 		pr_err("%s: Failed to allocate array for %u pages\n",
431 		       __func__, page_count);
432 		return NULL;
433 	}
434 
435 	for (i = 0; i < page_count; i++) {
436 		phys_addr_t addr = page_start + i * PAGE_SIZE;
437 		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
438 	}
439 	vaddr = vmap(pages, page_count, VM_MAP, prot);
440 	kfree(pages);
441 
442 	/*
443 	 * Since vmap() uses page granularity, we must add the offset
444 	 * into the page here, to get the byte granularity address
445 	 * into the mapping to represent the actual "start" location.
446 	 */
447 	return vaddr + offset_in_page(start);
448 }
449 
450 static void *persistent_ram_iomap(phys_addr_t start, size_t size,
451 		unsigned int memtype, char *label)
452 {
453 	void *va;
454 
455 	if (!request_mem_region(start, size, label ?: "ramoops")) {
456 		pr_err("request mem region (%s 0x%llx@0x%llx) failed\n",
457 			label ?: "ramoops",
458 			(unsigned long long)size, (unsigned long long)start);
459 		return NULL;
460 	}
461 
462 	if (memtype)
463 		va = ioremap(start, size);
464 	else
465 		va = ioremap_wc(start, size);
466 
467 	/*
468 	 * Since request_mem_region() and ioremap() are byte-granularity
469 	 * there is no need handle anything special like we do when the
470 	 * vmap() case in persistent_ram_vmap() above.
471 	 */
472 	return va;
473 }
474 
475 static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
476 		struct persistent_ram_zone *prz, int memtype)
477 {
478 	prz->paddr = start;
479 	prz->size = size;
480 
481 	if (pfn_valid(start >> PAGE_SHIFT))
482 		prz->vaddr = persistent_ram_vmap(start, size, memtype);
483 	else
484 		prz->vaddr = persistent_ram_iomap(start, size, memtype,
485 						  prz->label);
486 
487 	if (!prz->vaddr) {
488 		pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
489 			(unsigned long long)size, (unsigned long long)start);
490 		return -ENOMEM;
491 	}
492 
493 	prz->buffer = prz->vaddr;
494 	prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
495 
496 	return 0;
497 }
498 
499 static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
500 				    struct persistent_ram_ecc_info *ecc_info)
501 {
502 	int ret;
503 	bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD);
504 
505 	ret = persistent_ram_init_ecc(prz, ecc_info);
506 	if (ret) {
507 		pr_warn("ECC failed %s\n", prz->label);
508 		return ret;
509 	}
510 
511 	sig ^= PERSISTENT_RAM_SIG;
512 
513 	if (prz->buffer->sig == sig) {
514 		if (buffer_size(prz) == 0) {
515 			pr_debug("found existing empty buffer\n");
516 			return 0;
517 		}
518 
519 		if (buffer_size(prz) > prz->buffer_size ||
520 		    buffer_start(prz) > buffer_size(prz)) {
521 			pr_info("found existing invalid buffer, size %zu, start %zu\n",
522 				buffer_size(prz), buffer_start(prz));
523 			zap = true;
524 		} else {
525 			pr_debug("found existing buffer, size %zu, start %zu\n",
526 				 buffer_size(prz), buffer_start(prz));
527 			persistent_ram_save_old(prz);
528 		}
529 	} else {
530 		pr_debug("no valid data in buffer (sig = 0x%08x)\n",
531 			 prz->buffer->sig);
532 		prz->buffer->sig = sig;
533 		zap = true;
534 	}
535 
536 	/* Reset missing, invalid, or single-use memory area. */
537 	if (zap)
538 		persistent_ram_zap(prz);
539 
540 	return 0;
541 }
542 
543 void persistent_ram_free(struct persistent_ram_zone *prz)
544 {
545 	if (!prz)
546 		return;
547 
548 	if (prz->vaddr) {
549 		if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
550 			/* We must vunmap() at page-granularity. */
551 			vunmap(prz->vaddr - offset_in_page(prz->paddr));
552 		} else {
553 			iounmap(prz->vaddr);
554 			release_mem_region(prz->paddr, prz->size);
555 		}
556 		prz->vaddr = NULL;
557 	}
558 	if (prz->rs_decoder) {
559 		free_rs(prz->rs_decoder);
560 		prz->rs_decoder = NULL;
561 	}
562 	kfree(prz->ecc_info.par);
563 	prz->ecc_info.par = NULL;
564 
565 	persistent_ram_free_old(prz);
566 	kfree(prz->label);
567 	kfree(prz);
568 }
569 
570 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
571 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
572 			unsigned int memtype, u32 flags, char *label)
573 {
574 	struct persistent_ram_zone *prz;
575 	int ret = -ENOMEM;
576 
577 	prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
578 	if (!prz) {
579 		pr_err("failed to allocate persistent ram zone\n");
580 		goto err;
581 	}
582 
583 	/* Initialize general buffer state. */
584 	raw_spin_lock_init(&prz->buffer_lock);
585 	prz->flags = flags;
586 	prz->label = label;
587 
588 	ret = persistent_ram_buffer_map(start, size, prz, memtype);
589 	if (ret)
590 		goto err;
591 
592 	ret = persistent_ram_post_init(prz, sig, ecc_info);
593 	if (ret)
594 		goto err;
595 
596 	pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n",
597 		prz->label, prz->size, (unsigned long long)prz->paddr,
598 		sizeof(*prz->buffer), prz->buffer_size,
599 		prz->size - sizeof(*prz->buffer) - prz->buffer_size,
600 		prz->ecc_info.ecc_size, prz->ecc_info.block_size);
601 
602 	return prz;
603 err:
604 	persistent_ram_free(prz);
605 	return ERR_PTR(ret);
606 }
607