1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/init.h> 3 #include <linux/fs.h> 4 #include <linux/slab.h> 5 #include <linux/types.h> 6 #include <linux/fcntl.h> 7 #include <linux/delay.h> 8 #include <linux/string.h> 9 #include <linux/dirent.h> 10 #include <linux/syscalls.h> 11 #include <linux/utime.h> 12 #include <linux/file.h> 13 #include <linux/memblock.h> 14 #include <linux/mm.h> 15 #include <linux/namei.h> 16 #include <linux/init_syscalls.h> 17 18 static ssize_t __init xwrite(struct file *file, const char *p, size_t count, 19 loff_t *pos) 20 { 21 ssize_t out = 0; 22 23 /* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */ 24 while (count) { 25 ssize_t rv = kernel_write(file, p, count, pos); 26 27 if (rv < 0) { 28 if (rv == -EINTR || rv == -EAGAIN) 29 continue; 30 return out ? out : rv; 31 } else if (rv == 0) 32 break; 33 34 p += rv; 35 out += rv; 36 count -= rv; 37 } 38 39 return out; 40 } 41 42 static __initdata char *message; 43 static void __init error(char *x) 44 { 45 if (!message) 46 message = x; 47 } 48 49 static void panic_show_mem(const char *fmt, ...) 50 { 51 va_list args; 52 53 show_mem(0, NULL); 54 va_start(args, fmt); 55 panic(fmt, args); 56 va_end(args); 57 } 58 59 /* link hash */ 60 61 #define N_ALIGN(len) ((((len) + 1) & ~3) + 2) 62 63 static __initdata struct hash { 64 int ino, minor, major; 65 umode_t mode; 66 struct hash *next; 67 char name[N_ALIGN(PATH_MAX)]; 68 } *head[32]; 69 70 static inline int hash(int major, int minor, int ino) 71 { 72 unsigned long tmp = ino + minor + (major << 3); 73 tmp += tmp >> 5; 74 return tmp & 31; 75 } 76 77 static char __init *find_link(int major, int minor, int ino, 78 umode_t mode, char *name) 79 { 80 struct hash **p, *q; 81 for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) { 82 if ((*p)->ino != ino) 83 continue; 84 if ((*p)->minor != minor) 85 continue; 86 if ((*p)->major != major) 87 continue; 88 if (((*p)->mode ^ mode) & S_IFMT) 89 continue; 90 return (*p)->name; 91 } 92 q = kmalloc(sizeof(struct hash), GFP_KERNEL); 93 if (!q) 94 panic_show_mem("can't allocate link hash entry"); 95 q->major = major; 96 q->minor = minor; 97 q->ino = ino; 98 q->mode = mode; 99 strcpy(q->name, name); 100 q->next = NULL; 101 *p = q; 102 return NULL; 103 } 104 105 static void __init free_hash(void) 106 { 107 struct hash **p, *q; 108 for (p = head; p < head + 32; p++) { 109 while (*p) { 110 q = *p; 111 *p = q->next; 112 kfree(q); 113 } 114 } 115 } 116 117 static long __init do_utime(char *filename, time64_t mtime) 118 { 119 struct timespec64 t[2]; 120 121 t[0].tv_sec = mtime; 122 t[0].tv_nsec = 0; 123 t[1].tv_sec = mtime; 124 t[1].tv_nsec = 0; 125 return init_utimes(filename, t); 126 } 127 128 static __initdata LIST_HEAD(dir_list); 129 struct dir_entry { 130 struct list_head list; 131 char *name; 132 time64_t mtime; 133 }; 134 135 static void __init dir_add(const char *name, time64_t mtime) 136 { 137 struct dir_entry *de = kmalloc(sizeof(struct dir_entry), GFP_KERNEL); 138 if (!de) 139 panic_show_mem("can't allocate dir_entry buffer"); 140 INIT_LIST_HEAD(&de->list); 141 de->name = kstrdup(name, GFP_KERNEL); 142 de->mtime = mtime; 143 list_add(&de->list, &dir_list); 144 } 145 146 static void __init dir_utime(void) 147 { 148 struct dir_entry *de, *tmp; 149 list_for_each_entry_safe(de, tmp, &dir_list, list) { 150 list_del(&de->list); 151 do_utime(de->name, de->mtime); 152 kfree(de->name); 153 kfree(de); 154 } 155 } 156 157 static __initdata time64_t mtime; 158 159 /* cpio header parsing */ 160 161 static __initdata unsigned long ino, major, minor, nlink; 162 static __initdata umode_t mode; 163 static __initdata unsigned long body_len, name_len; 164 static __initdata uid_t uid; 165 static __initdata gid_t gid; 166 static __initdata unsigned rdev; 167 168 static void __init parse_header(char *s) 169 { 170 unsigned long parsed[12]; 171 char buf[9]; 172 int i; 173 174 buf[8] = '\0'; 175 for (i = 0, s += 6; i < 12; i++, s += 8) { 176 memcpy(buf, s, 8); 177 parsed[i] = simple_strtoul(buf, NULL, 16); 178 } 179 ino = parsed[0]; 180 mode = parsed[1]; 181 uid = parsed[2]; 182 gid = parsed[3]; 183 nlink = parsed[4]; 184 mtime = parsed[5]; /* breaks in y2106 */ 185 body_len = parsed[6]; 186 major = parsed[7]; 187 minor = parsed[8]; 188 rdev = new_encode_dev(MKDEV(parsed[9], parsed[10])); 189 name_len = parsed[11]; 190 } 191 192 /* FSM */ 193 194 static __initdata enum state { 195 Start, 196 Collect, 197 GotHeader, 198 SkipIt, 199 GotName, 200 CopyFile, 201 GotSymlink, 202 Reset 203 } state, next_state; 204 205 static __initdata char *victim; 206 static unsigned long byte_count __initdata; 207 static __initdata loff_t this_header, next_header; 208 209 static inline void __init eat(unsigned n) 210 { 211 victim += n; 212 this_header += n; 213 byte_count -= n; 214 } 215 216 static __initdata char *collected; 217 static long remains __initdata; 218 static __initdata char *collect; 219 220 static void __init read_into(char *buf, unsigned size, enum state next) 221 { 222 if (byte_count >= size) { 223 collected = victim; 224 eat(size); 225 state = next; 226 } else { 227 collect = collected = buf; 228 remains = size; 229 next_state = next; 230 state = Collect; 231 } 232 } 233 234 static __initdata char *header_buf, *symlink_buf, *name_buf; 235 236 static int __init do_start(void) 237 { 238 read_into(header_buf, 110, GotHeader); 239 return 0; 240 } 241 242 static int __init do_collect(void) 243 { 244 unsigned long n = remains; 245 if (byte_count < n) 246 n = byte_count; 247 memcpy(collect, victim, n); 248 eat(n); 249 collect += n; 250 if ((remains -= n) != 0) 251 return 1; 252 state = next_state; 253 return 0; 254 } 255 256 static int __init do_header(void) 257 { 258 if (memcmp(collected, "070707", 6)==0) { 259 error("incorrect cpio method used: use -H newc option"); 260 return 1; 261 } 262 if (memcmp(collected, "070701", 6)) { 263 error("no cpio magic"); 264 return 1; 265 } 266 parse_header(collected); 267 next_header = this_header + N_ALIGN(name_len) + body_len; 268 next_header = (next_header + 3) & ~3; 269 state = SkipIt; 270 if (name_len <= 0 || name_len > PATH_MAX) 271 return 0; 272 if (S_ISLNK(mode)) { 273 if (body_len > PATH_MAX) 274 return 0; 275 collect = collected = symlink_buf; 276 remains = N_ALIGN(name_len) + body_len; 277 next_state = GotSymlink; 278 state = Collect; 279 return 0; 280 } 281 if (S_ISREG(mode) || !body_len) 282 read_into(name_buf, N_ALIGN(name_len), GotName); 283 return 0; 284 } 285 286 static int __init do_skip(void) 287 { 288 if (this_header + byte_count < next_header) { 289 eat(byte_count); 290 return 1; 291 } else { 292 eat(next_header - this_header); 293 state = next_state; 294 return 0; 295 } 296 } 297 298 static int __init do_reset(void) 299 { 300 while (byte_count && *victim == '\0') 301 eat(1); 302 if (byte_count && (this_header & 3)) 303 error("broken padding"); 304 return 1; 305 } 306 307 static void __init clean_path(char *path, umode_t fmode) 308 { 309 struct kstat st; 310 311 if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) && 312 (st.mode ^ fmode) & S_IFMT) { 313 if (S_ISDIR(st.mode)) 314 init_rmdir(path); 315 else 316 init_unlink(path); 317 } 318 } 319 320 static int __init maybe_link(void) 321 { 322 if (nlink >= 2) { 323 char *old = find_link(major, minor, ino, mode, collected); 324 if (old) { 325 clean_path(collected, 0); 326 return (init_link(old, collected) < 0) ? -1 : 1; 327 } 328 } 329 return 0; 330 } 331 332 static __initdata struct file *wfile; 333 static __initdata loff_t wfile_pos; 334 335 static int __init do_name(void) 336 { 337 state = SkipIt; 338 next_state = Reset; 339 if (strcmp(collected, "TRAILER!!!") == 0) { 340 free_hash(); 341 return 0; 342 } 343 clean_path(collected, mode); 344 if (S_ISREG(mode)) { 345 int ml = maybe_link(); 346 if (ml >= 0) { 347 int openflags = O_WRONLY|O_CREAT; 348 if (ml != 1) 349 openflags |= O_TRUNC; 350 wfile = filp_open(collected, openflags, mode); 351 if (IS_ERR(wfile)) 352 return 0; 353 wfile_pos = 0; 354 355 vfs_fchown(wfile, uid, gid); 356 vfs_fchmod(wfile, mode); 357 if (body_len) 358 vfs_truncate(&wfile->f_path, body_len); 359 state = CopyFile; 360 } 361 } else if (S_ISDIR(mode)) { 362 init_mkdir(collected, mode); 363 init_chown(collected, uid, gid, 0); 364 init_chmod(collected, mode); 365 dir_add(collected, mtime); 366 } else if (S_ISBLK(mode) || S_ISCHR(mode) || 367 S_ISFIFO(mode) || S_ISSOCK(mode)) { 368 if (maybe_link() == 0) { 369 init_mknod(collected, mode, rdev); 370 init_chown(collected, uid, gid, 0); 371 init_chmod(collected, mode); 372 do_utime(collected, mtime); 373 } 374 } 375 return 0; 376 } 377 378 static int __init do_copy(void) 379 { 380 if (byte_count >= body_len) { 381 struct timespec64 t[2] = { }; 382 if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len) 383 error("write error"); 384 385 t[0].tv_sec = mtime; 386 t[1].tv_sec = mtime; 387 vfs_utimes(&wfile->f_path, t); 388 389 fput(wfile); 390 eat(body_len); 391 state = SkipIt; 392 return 0; 393 } else { 394 if (xwrite(wfile, victim, byte_count, &wfile_pos) != byte_count) 395 error("write error"); 396 body_len -= byte_count; 397 eat(byte_count); 398 return 1; 399 } 400 } 401 402 static int __init do_symlink(void) 403 { 404 collected[N_ALIGN(name_len) + body_len] = '\0'; 405 clean_path(collected, 0); 406 init_symlink(collected + N_ALIGN(name_len), collected); 407 init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW); 408 do_utime(collected, mtime); 409 state = SkipIt; 410 next_state = Reset; 411 return 0; 412 } 413 414 static __initdata int (*actions[])(void) = { 415 [Start] = do_start, 416 [Collect] = do_collect, 417 [GotHeader] = do_header, 418 [SkipIt] = do_skip, 419 [GotName] = do_name, 420 [CopyFile] = do_copy, 421 [GotSymlink] = do_symlink, 422 [Reset] = do_reset, 423 }; 424 425 static long __init write_buffer(char *buf, unsigned long len) 426 { 427 byte_count = len; 428 victim = buf; 429 430 while (!actions[state]()) 431 ; 432 return len - byte_count; 433 } 434 435 static long __init flush_buffer(void *bufv, unsigned long len) 436 { 437 char *buf = (char *) bufv; 438 long written; 439 long origLen = len; 440 if (message) 441 return -1; 442 while ((written = write_buffer(buf, len)) < len && !message) { 443 char c = buf[written]; 444 if (c == '0') { 445 buf += written; 446 len -= written; 447 state = Start; 448 } else if (c == 0) { 449 buf += written; 450 len -= written; 451 state = Reset; 452 } else 453 error("junk within compressed archive"); 454 } 455 return origLen; 456 } 457 458 static unsigned long my_inptr; /* index of next byte to be processed in inbuf */ 459 460 #include <linux/decompress/generic.h> 461 462 static char * __init unpack_to_rootfs(char *buf, unsigned long len) 463 { 464 long written; 465 decompress_fn decompress; 466 const char *compress_name; 467 static __initdata char msg_buf[64]; 468 469 header_buf = kmalloc(110, GFP_KERNEL); 470 symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL); 471 name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL); 472 473 if (!header_buf || !symlink_buf || !name_buf) 474 panic_show_mem("can't allocate buffers"); 475 476 state = Start; 477 this_header = 0; 478 message = NULL; 479 while (!message && len) { 480 loff_t saved_offset = this_header; 481 if (*buf == '0' && !(this_header & 3)) { 482 state = Start; 483 written = write_buffer(buf, len); 484 buf += written; 485 len -= written; 486 continue; 487 } 488 if (!*buf) { 489 buf++; 490 len--; 491 this_header++; 492 continue; 493 } 494 this_header = 0; 495 decompress = decompress_method(buf, len, &compress_name); 496 pr_debug("Detected %s compressed data\n", compress_name); 497 if (decompress) { 498 int res = decompress(buf, len, NULL, flush_buffer, NULL, 499 &my_inptr, error); 500 if (res) 501 error("decompressor failed"); 502 } else if (compress_name) { 503 if (!message) { 504 snprintf(msg_buf, sizeof msg_buf, 505 "compression method %s not configured", 506 compress_name); 507 message = msg_buf; 508 } 509 } else 510 error("invalid magic at start of compressed archive"); 511 if (state != Reset) 512 error("junk at the end of compressed archive"); 513 this_header = saved_offset + my_inptr; 514 buf += my_inptr; 515 len -= my_inptr; 516 } 517 dir_utime(); 518 kfree(name_buf); 519 kfree(symlink_buf); 520 kfree(header_buf); 521 return message; 522 } 523 524 static int __initdata do_retain_initrd; 525 526 static int __init retain_initrd_param(char *str) 527 { 528 if (*str) 529 return 0; 530 do_retain_initrd = 1; 531 return 1; 532 } 533 __setup("retain_initrd", retain_initrd_param); 534 535 #ifdef CONFIG_ARCH_HAS_KEEPINITRD 536 static int __init keepinitrd_setup(char *__unused) 537 { 538 do_retain_initrd = 1; 539 return 1; 540 } 541 __setup("keepinitrd", keepinitrd_setup); 542 #endif 543 544 extern char __initramfs_start[]; 545 extern unsigned long __initramfs_size; 546 #include <linux/initrd.h> 547 #include <linux/kexec.h> 548 549 void __init reserve_initrd_mem(void) 550 { 551 phys_addr_t start; 552 unsigned long size; 553 554 /* Ignore the virtul address computed during device tree parsing */ 555 initrd_start = initrd_end = 0; 556 557 if (!phys_initrd_size) 558 return; 559 /* 560 * Round the memory region to page boundaries as per free_initrd_mem() 561 * This allows us to detect whether the pages overlapping the initrd 562 * are in use, but more importantly, reserves the entire set of pages 563 * as we don't want these pages allocated for other purposes. 564 */ 565 start = round_down(phys_initrd_start, PAGE_SIZE); 566 size = phys_initrd_size + (phys_initrd_start - start); 567 size = round_up(size, PAGE_SIZE); 568 569 if (!memblock_is_region_memory(start, size)) { 570 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region", 571 (u64)start, size); 572 goto disable; 573 } 574 575 if (memblock_is_region_reserved(start, size)) { 576 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n", 577 (u64)start, size); 578 goto disable; 579 } 580 581 memblock_reserve(start, size); 582 /* Now convert initrd to virtual addresses */ 583 initrd_start = (unsigned long)__va(phys_initrd_start); 584 initrd_end = initrd_start + phys_initrd_size; 585 initrd_below_start_ok = 1; 586 587 return; 588 disable: 589 pr_cont(" - disabling initrd\n"); 590 initrd_start = 0; 591 initrd_end = 0; 592 } 593 594 void __weak __init free_initrd_mem(unsigned long start, unsigned long end) 595 { 596 #ifdef CONFIG_ARCH_KEEP_MEMBLOCK 597 unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE); 598 unsigned long aligned_end = ALIGN(end, PAGE_SIZE); 599 600 memblock_free(__pa(aligned_start), aligned_end - aligned_start); 601 #endif 602 603 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, 604 "initrd"); 605 } 606 607 #ifdef CONFIG_KEXEC_CORE 608 static bool __init kexec_free_initrd(void) 609 { 610 unsigned long crashk_start = (unsigned long)__va(crashk_res.start); 611 unsigned long crashk_end = (unsigned long)__va(crashk_res.end); 612 613 /* 614 * If the initrd region is overlapped with crashkernel reserved region, 615 * free only memory that is not part of crashkernel region. 616 */ 617 if (initrd_start >= crashk_end || initrd_end <= crashk_start) 618 return false; 619 620 /* 621 * Initialize initrd memory region since the kexec boot does not do. 622 */ 623 memset((void *)initrd_start, 0, initrd_end - initrd_start); 624 if (initrd_start < crashk_start) 625 free_initrd_mem(initrd_start, crashk_start); 626 if (initrd_end > crashk_end) 627 free_initrd_mem(crashk_end, initrd_end); 628 return true; 629 } 630 #else 631 static inline bool kexec_free_initrd(void) 632 { 633 return false; 634 } 635 #endif /* CONFIG_KEXEC_CORE */ 636 637 #ifdef CONFIG_BLK_DEV_RAM 638 static void __init populate_initrd_image(char *err) 639 { 640 ssize_t written; 641 struct file *file; 642 loff_t pos = 0; 643 644 unpack_to_rootfs(__initramfs_start, __initramfs_size); 645 646 printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n", 647 err); 648 file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700); 649 if (IS_ERR(file)) 650 return; 651 652 written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start, 653 &pos); 654 if (written != initrd_end - initrd_start) 655 pr_err("/initrd.image: incomplete write (%zd != %ld)\n", 656 written, initrd_end - initrd_start); 657 fput(file); 658 } 659 #endif /* CONFIG_BLK_DEV_RAM */ 660 661 static int __init populate_rootfs(void) 662 { 663 /* Load the built in initramfs */ 664 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size); 665 if (err) 666 panic_show_mem("%s", err); /* Failed to decompress INTERNAL initramfs */ 667 668 if (!initrd_start || IS_ENABLED(CONFIG_INITRAMFS_FORCE)) 669 goto done; 670 671 if (IS_ENABLED(CONFIG_BLK_DEV_RAM)) 672 printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n"); 673 else 674 printk(KERN_INFO "Unpacking initramfs...\n"); 675 676 err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start); 677 if (err) { 678 #ifdef CONFIG_BLK_DEV_RAM 679 populate_initrd_image(err); 680 #else 681 printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); 682 #endif 683 } 684 685 done: 686 /* 687 * If the initrd region is overlapped with crashkernel reserved region, 688 * free only memory that is not part of crashkernel region. 689 */ 690 if (!do_retain_initrd && initrd_start && !kexec_free_initrd()) 691 free_initrd_mem(initrd_start, initrd_end); 692 initrd_start = 0; 693 initrd_end = 0; 694 695 flush_delayed_fput(); 696 return 0; 697 } 698 rootfs_initcall(populate_rootfs); 699