1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Google, Inc. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/device.h> 9 #include <linux/err.h> 10 #include <linux/errno.h> 11 #include <linux/init.h> 12 #include <linux/io.h> 13 #include <linux/kernel.h> 14 #include <linux/list.h> 15 #include <linux/memblock.h> 16 #include <linux/rslib.h> 17 #include <linux/slab.h> 18 #include <linux/uaccess.h> 19 #include <linux/vmalloc.h> 20 #include <linux/mm.h> 21 #include <asm/page.h> 22 23 #include "ram_internal.h" 24 25 /** 26 * struct persistent_ram_buffer - persistent circular RAM buffer 27 * 28 * @sig: Signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value) 29 * @start: First valid byte in the buffer. 30 * @size: Number of valid bytes in the buffer. 31 * @data: The contents of the buffer. 32 */ 33 struct persistent_ram_buffer { 34 uint32_t sig; 35 atomic_t start; 36 atomic_t size; 37 uint8_t data[]; 38 }; 39 40 #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */ 41 42 static inline size_t buffer_size(struct persistent_ram_zone *prz) 43 { 44 return atomic_read(&prz->buffer->size); 45 } 46 47 static inline size_t buffer_start(struct persistent_ram_zone *prz) 48 { 49 return atomic_read(&prz->buffer->start); 50 } 51 52 /* increase and wrap the start pointer, returning the old value */ 53 static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) 54 { 55 int old; 56 int new; 57 unsigned long flags = 0; 58 59 if (!(prz->flags & PRZ_FLAG_NO_LOCK)) 60 raw_spin_lock_irqsave(&prz->buffer_lock, flags); 61 62 old = atomic_read(&prz->buffer->start); 63 new = old + a; 64 while (unlikely(new >= prz->buffer_size)) 65 new -= prz->buffer_size; 66 atomic_set(&prz->buffer->start, new); 67 68 if (!(prz->flags & PRZ_FLAG_NO_LOCK)) 69 raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); 70 71 return old; 72 } 73 74 /* increase the size counter until it hits the max size */ 75 static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) 76 { 77 size_t old; 78 size_t new; 79 unsigned long flags = 0; 80 81 if (!(prz->flags & PRZ_FLAG_NO_LOCK)) 82 raw_spin_lock_irqsave(&prz->buffer_lock, flags); 83 84 old = atomic_read(&prz->buffer->size); 85 if (old == prz->buffer_size) 86 goto exit; 87 88 new = old + a; 89 if (new > prz->buffer_size) 90 new = prz->buffer_size; 91 atomic_set(&prz->buffer->size, new); 92 93 exit: 94 if (!(prz->flags & PRZ_FLAG_NO_LOCK)) 95 raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); 96 } 97 98 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, 99 uint8_t *data, size_t len, uint8_t *ecc) 100 { 101 int i; 102 103 /* Initialize the parity buffer */ 104 memset(prz->ecc_info.par, 0, 105 prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0])); 106 encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0); 107 for (i = 0; i < prz->ecc_info.ecc_size; i++) 108 ecc[i] = prz->ecc_info.par[i]; 109 } 110 111 static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz, 112 void *data, size_t len, uint8_t *ecc) 113 { 114 int i; 115 116 for (i = 0; i < prz->ecc_info.ecc_size; i++) 117 prz->ecc_info.par[i] = ecc[i]; 118 return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len, 119 NULL, 0, NULL, 0, NULL); 120 } 121 122 static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz, 123 unsigned int start, unsigned int count) 124 { 125 struct persistent_ram_buffer *buffer = prz->buffer; 126 uint8_t *buffer_end = buffer->data + prz->buffer_size; 127 uint8_t *block; 128 uint8_t *par; 129 int ecc_block_size = prz->ecc_info.block_size; 130 int ecc_size = prz->ecc_info.ecc_size; 131 int size = ecc_block_size; 132 133 if (!ecc_size) 134 return; 135 136 block = buffer->data + (start & ~(ecc_block_size - 1)); 137 par = prz->par_buffer + (start / ecc_block_size) * ecc_size; 138 139 do { 140 if (block + ecc_block_size > buffer_end) 141 size = buffer_end - block; 142 persistent_ram_encode_rs8(prz, block, size, par); 143 block += ecc_block_size; 144 par += ecc_size; 145 } while (block < buffer->data + start + count); 146 } 147 148 static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz) 149 { 150 struct persistent_ram_buffer *buffer = prz->buffer; 151 152 if (!prz->ecc_info.ecc_size) 153 return; 154 155 persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer), 156 prz->par_header); 157 } 158 159 static void persistent_ram_ecc_old(struct persistent_ram_zone *prz) 160 { 161 struct persistent_ram_buffer *buffer = prz->buffer; 162 uint8_t *block; 163 uint8_t *par; 164 165 if (!prz->ecc_info.ecc_size) 166 return; 167 168 block = buffer->data; 169 par = prz->par_buffer; 170 while (block < buffer->data + buffer_size(prz)) { 171 int numerr; 172 int size = prz->ecc_info.block_size; 173 if (block + size > buffer->data + prz->buffer_size) 174 size = buffer->data + prz->buffer_size - block; 175 numerr = persistent_ram_decode_rs8(prz, block, size, par); 176 if (numerr > 0) { 177 pr_devel("error in block %p, %d\n", block, numerr); 178 prz->corrected_bytes += numerr; 179 } else if (numerr < 0) { 180 pr_devel("uncorrectable error in block %p\n", block); 181 prz->bad_blocks++; 182 } 183 block += prz->ecc_info.block_size; 184 par += prz->ecc_info.ecc_size; 185 } 186 } 187 188 static int persistent_ram_init_ecc(struct persistent_ram_zone *prz, 189 struct persistent_ram_ecc_info *ecc_info) 190 { 191 int numerr; 192 struct persistent_ram_buffer *buffer = prz->buffer; 193 size_t ecc_blocks; 194 size_t ecc_total; 195 196 if (!ecc_info || !ecc_info->ecc_size) 197 return 0; 198 199 prz->ecc_info.block_size = ecc_info->block_size ?: 128; 200 prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16; 201 prz->ecc_info.symsize = ecc_info->symsize ?: 8; 202 prz->ecc_info.poly = ecc_info->poly ?: 0x11d; 203 204 ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size, 205 prz->ecc_info.block_size + 206 prz->ecc_info.ecc_size); 207 ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size; 208 if (ecc_total >= prz->buffer_size) { 209 pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n", 210 __func__, prz->ecc_info.ecc_size, 211 ecc_total, prz->buffer_size); 212 return -EINVAL; 213 } 214 215 prz->buffer_size -= ecc_total; 216 prz->par_buffer = buffer->data + prz->buffer_size; 217 prz->par_header = prz->par_buffer + 218 ecc_blocks * prz->ecc_info.ecc_size; 219 220 /* 221 * first consecutive root is 0 222 * primitive element to generate roots = 1 223 */ 224 prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly, 225 0, 1, prz->ecc_info.ecc_size); 226 if (prz->rs_decoder == NULL) { 227 pr_info("init_rs failed\n"); 228 return -EINVAL; 229 } 230 231 /* allocate workspace instead of using stack VLA */ 232 prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size, 233 sizeof(*prz->ecc_info.par), 234 GFP_KERNEL); 235 if (!prz->ecc_info.par) { 236 pr_err("cannot allocate ECC parity workspace\n"); 237 return -ENOMEM; 238 } 239 240 prz->corrected_bytes = 0; 241 prz->bad_blocks = 0; 242 243 numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer), 244 prz->par_header); 245 if (numerr > 0) { 246 pr_info("error in header, %d\n", numerr); 247 prz->corrected_bytes += numerr; 248 } else if (numerr < 0) { 249 pr_info_ratelimited("uncorrectable error in header\n"); 250 prz->bad_blocks++; 251 } 252 253 return 0; 254 } 255 256 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, 257 char *str, size_t len) 258 { 259 ssize_t ret; 260 261 if (!prz->ecc_info.ecc_size) 262 return 0; 263 264 if (prz->corrected_bytes || prz->bad_blocks) 265 ret = snprintf(str, len, "" 266 "\nECC: %d Corrected bytes, %d unrecoverable blocks\n", 267 prz->corrected_bytes, prz->bad_blocks); 268 else 269 ret = snprintf(str, len, "\nECC: No errors detected\n"); 270 271 return ret; 272 } 273 274 static void notrace persistent_ram_update(struct persistent_ram_zone *prz, 275 const void *s, unsigned int start, unsigned int count) 276 { 277 struct persistent_ram_buffer *buffer = prz->buffer; 278 memcpy_toio(buffer->data + start, s, count); 279 persistent_ram_update_ecc(prz, start, count); 280 } 281 282 static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz, 283 const void __user *s, unsigned int start, unsigned int count) 284 { 285 struct persistent_ram_buffer *buffer = prz->buffer; 286 int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ? 287 -EFAULT : 0; 288 persistent_ram_update_ecc(prz, start, count); 289 return ret; 290 } 291 292 void persistent_ram_save_old(struct persistent_ram_zone *prz) 293 { 294 struct persistent_ram_buffer *buffer = prz->buffer; 295 size_t size = buffer_size(prz); 296 size_t start = buffer_start(prz); 297 298 if (!size) 299 return; 300 301 /* 302 * If the existing buffer is differently sized, free it so a new 303 * one is allocated. This can happen when persistent_ram_save_old() 304 * is called early in boot and later for a timer-triggered 305 * survivable crash when the crash dumps don't match in size 306 * (which would be extremely unlikely given kmsg buffers usually 307 * exceed prz buffer sizes). 308 */ 309 if (prz->old_log && prz->old_log_size != size) 310 persistent_ram_free_old(prz); 311 312 if (!prz->old_log) { 313 persistent_ram_ecc_old(prz); 314 prz->old_log = kvzalloc(size, GFP_KERNEL); 315 } 316 if (!prz->old_log) { 317 pr_err("failed to allocate buffer\n"); 318 return; 319 } 320 321 prz->old_log_size = size; 322 memcpy_fromio(prz->old_log, &buffer->data[start], size - start); 323 memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); 324 } 325 326 int notrace persistent_ram_write(struct persistent_ram_zone *prz, 327 const void *s, unsigned int count) 328 { 329 int rem; 330 int c = count; 331 size_t start; 332 333 if (unlikely(c > prz->buffer_size)) { 334 s += c - prz->buffer_size; 335 c = prz->buffer_size; 336 } 337 338 buffer_size_add(prz, c); 339 340 start = buffer_start_add(prz, c); 341 342 rem = prz->buffer_size - start; 343 if (unlikely(rem < c)) { 344 persistent_ram_update(prz, s, start, rem); 345 s += rem; 346 c -= rem; 347 start = 0; 348 } 349 persistent_ram_update(prz, s, start, c); 350 351 persistent_ram_update_header_ecc(prz); 352 353 return count; 354 } 355 356 int notrace persistent_ram_write_user(struct persistent_ram_zone *prz, 357 const void __user *s, unsigned int count) 358 { 359 int rem, ret = 0, c = count; 360 size_t start; 361 362 if (unlikely(c > prz->buffer_size)) { 363 s += c - prz->buffer_size; 364 c = prz->buffer_size; 365 } 366 367 buffer_size_add(prz, c); 368 369 start = buffer_start_add(prz, c); 370 371 rem = prz->buffer_size - start; 372 if (unlikely(rem < c)) { 373 ret = persistent_ram_update_user(prz, s, start, rem); 374 s += rem; 375 c -= rem; 376 start = 0; 377 } 378 if (likely(!ret)) 379 ret = persistent_ram_update_user(prz, s, start, c); 380 381 persistent_ram_update_header_ecc(prz); 382 383 return unlikely(ret) ? ret : count; 384 } 385 386 size_t persistent_ram_old_size(struct persistent_ram_zone *prz) 387 { 388 return prz->old_log_size; 389 } 390 391 void *persistent_ram_old(struct persistent_ram_zone *prz) 392 { 393 return prz->old_log; 394 } 395 396 void persistent_ram_free_old(struct persistent_ram_zone *prz) 397 { 398 kvfree(prz->old_log); 399 prz->old_log = NULL; 400 prz->old_log_size = 0; 401 } 402 403 void persistent_ram_zap(struct persistent_ram_zone *prz) 404 { 405 atomic_set(&prz->buffer->start, 0); 406 atomic_set(&prz->buffer->size, 0); 407 persistent_ram_update_header_ecc(prz); 408 } 409 410 #define MEM_TYPE_WCOMBINE 0 411 #define MEM_TYPE_NONCACHED 1 412 #define MEM_TYPE_NORMAL 2 413 414 static void *persistent_ram_vmap(phys_addr_t start, size_t size, 415 unsigned int memtype) 416 { 417 struct page **pages; 418 phys_addr_t page_start; 419 unsigned int page_count; 420 pgprot_t prot; 421 unsigned int i; 422 void *vaddr; 423 424 page_start = start - offset_in_page(start); 425 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); 426 427 switch (memtype) { 428 case MEM_TYPE_NORMAL: 429 prot = PAGE_KERNEL; 430 break; 431 case MEM_TYPE_NONCACHED: 432 prot = pgprot_noncached(PAGE_KERNEL); 433 break; 434 case MEM_TYPE_WCOMBINE: 435 prot = pgprot_writecombine(PAGE_KERNEL); 436 break; 437 default: 438 pr_err("invalid mem_type=%d\n", memtype); 439 return NULL; 440 } 441 442 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); 443 if (!pages) { 444 pr_err("%s: Failed to allocate array for %u pages\n", 445 __func__, page_count); 446 return NULL; 447 } 448 449 for (i = 0; i < page_count; i++) { 450 phys_addr_t addr = page_start + i * PAGE_SIZE; 451 pages[i] = pfn_to_page(addr >> PAGE_SHIFT); 452 } 453 /* 454 * VM_IOREMAP used here to bypass this region during vread() 455 * and kmap_atomic() (i.e. kcore) to avoid __va() failures. 456 */ 457 vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); 458 kfree(pages); 459 460 /* 461 * vmap() may fail and return NULL. Do not add the offset in this 462 * case, otherwise a NULL mapping would appear successful. 463 */ 464 if (!vaddr) 465 return NULL; 466 467 /* 468 * Since vmap() uses page granularity, we must add the offset 469 * into the page here, to get the byte granularity address 470 * into the mapping to represent the actual "start" location. 471 */ 472 return vaddr + offset_in_page(start); 473 } 474 475 static void *persistent_ram_iomap(phys_addr_t start, size_t size, 476 unsigned int memtype, char *label) 477 { 478 void *va; 479 480 if (!request_mem_region(start, size, label ?: "ramoops")) { 481 pr_err("request mem region (%s 0x%llx@0x%llx) failed\n", 482 label ?: "ramoops", 483 (unsigned long long)size, (unsigned long long)start); 484 return NULL; 485 } 486 487 if (memtype) 488 va = ioremap(start, size); 489 else 490 va = ioremap_wc(start, size); 491 492 /* 493 * Since request_mem_region() and ioremap() are byte-granularity 494 * there is no need handle anything special like we do when the 495 * vmap() case in persistent_ram_vmap() above. 496 */ 497 return va; 498 } 499 500 static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, 501 struct persistent_ram_zone *prz, int memtype) 502 { 503 prz->paddr = start; 504 prz->size = size; 505 506 if (pfn_valid(start >> PAGE_SHIFT)) 507 prz->vaddr = persistent_ram_vmap(start, size, memtype); 508 else 509 prz->vaddr = persistent_ram_iomap(start, size, memtype, 510 prz->label); 511 512 if (!prz->vaddr) { 513 pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, 514 (unsigned long long)size, (unsigned long long)start); 515 return -ENOMEM; 516 } 517 518 prz->buffer = prz->vaddr; 519 prz->buffer_size = size - sizeof(struct persistent_ram_buffer); 520 521 return 0; 522 } 523 524 static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, 525 struct persistent_ram_ecc_info *ecc_info) 526 { 527 int ret; 528 bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD); 529 530 ret = persistent_ram_init_ecc(prz, ecc_info); 531 if (ret) { 532 pr_warn("ECC failed %s\n", prz->label); 533 return ret; 534 } 535 536 sig ^= PERSISTENT_RAM_SIG; 537 538 if (prz->buffer->sig == sig) { 539 if (buffer_size(prz) == 0 && buffer_start(prz) == 0) { 540 pr_debug("found existing empty buffer\n"); 541 return 0; 542 } 543 544 if (buffer_size(prz) > prz->buffer_size || 545 buffer_start(prz) > buffer_size(prz)) { 546 pr_info("found existing invalid buffer, size %zu, start %zu\n", 547 buffer_size(prz), buffer_start(prz)); 548 zap = true; 549 } else { 550 pr_debug("found existing buffer, size %zu, start %zu\n", 551 buffer_size(prz), buffer_start(prz)); 552 persistent_ram_save_old(prz); 553 } 554 } else { 555 pr_debug("no valid data in buffer (sig = 0x%08x)\n", 556 prz->buffer->sig); 557 prz->buffer->sig = sig; 558 zap = true; 559 } 560 561 /* Reset missing, invalid, or single-use memory area. */ 562 if (zap) 563 persistent_ram_zap(prz); 564 565 return 0; 566 } 567 568 void persistent_ram_free(struct persistent_ram_zone **_prz) 569 { 570 struct persistent_ram_zone *prz; 571 572 if (!_prz) 573 return; 574 575 prz = *_prz; 576 if (!prz) 577 return; 578 579 if (prz->vaddr) { 580 if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { 581 /* We must vunmap() at page-granularity. */ 582 vunmap(prz->vaddr - offset_in_page(prz->paddr)); 583 } else { 584 iounmap(prz->vaddr); 585 release_mem_region(prz->paddr, prz->size); 586 } 587 prz->vaddr = NULL; 588 } 589 if (prz->rs_decoder) { 590 free_rs(prz->rs_decoder); 591 prz->rs_decoder = NULL; 592 } 593 kfree(prz->ecc_info.par); 594 prz->ecc_info.par = NULL; 595 596 persistent_ram_free_old(prz); 597 kfree(prz->label); 598 kfree(prz); 599 *_prz = NULL; 600 } 601 602 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, 603 u32 sig, struct persistent_ram_ecc_info *ecc_info, 604 unsigned int memtype, u32 flags, char *label) 605 { 606 struct persistent_ram_zone *prz; 607 int ret = -ENOMEM; 608 609 prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL); 610 if (!prz) { 611 pr_err("failed to allocate persistent ram zone\n"); 612 goto err; 613 } 614 615 /* Initialize general buffer state. */ 616 raw_spin_lock_init(&prz->buffer_lock); 617 prz->flags = flags; 618 prz->label = kstrdup(label, GFP_KERNEL); 619 if (!prz->label) 620 goto err; 621 622 ret = persistent_ram_buffer_map(start, size, prz, memtype); 623 if (ret) 624 goto err; 625 626 ret = persistent_ram_post_init(prz, sig, ecc_info); 627 if (ret) 628 goto err; 629 630 pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n", 631 prz->label, prz->size, (unsigned long long)prz->paddr, 632 sizeof(*prz->buffer), prz->buffer_size, 633 prz->size - sizeof(*prz->buffer) - prz->buffer_size, 634 prz->ecc_info.ecc_size, prz->ecc_info.block_size); 635 636 return prz; 637 err: 638 persistent_ram_free(&prz); 639 return ERR_PTR(ret); 640 } 641