1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * kexec.c - kexec system call core code. 4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/btf.h> 10 #include <linux/capability.h> 11 #include <linux/mm.h> 12 #include <linux/file.h> 13 #include <linux/slab.h> 14 #include <linux/fs.h> 15 #include <linux/kexec.h> 16 #include <linux/mutex.h> 17 #include <linux/list.h> 18 #include <linux/liveupdate.h> 19 #include <linux/highmem.h> 20 #include <linux/syscalls.h> 21 #include <linux/reboot.h> 22 #include <linux/ioport.h> 23 #include <linux/hardirq.h> 24 #include <linux/elf.h> 25 #include <linux/elfcore.h> 26 #include <linux/utsname.h> 27 #include <linux/numa.h> 28 #include <linux/suspend.h> 29 #include <linux/device.h> 30 #include <linux/freezer.h> 31 #include <linux/panic_notifier.h> 32 #include <linux/pm.h> 33 #include <linux/cpu.h> 34 #include <linux/uaccess.h> 35 #include <linux/io.h> 36 #include <linux/console.h> 37 #include <linux/vmalloc.h> 38 #include <linux/swap.h> 39 #include <linux/syscore_ops.h> 40 #include <linux/compiler.h> 41 #include <linux/hugetlb.h> 42 #include <linux/objtool.h> 43 #include <linux/kmsg_dump.h> 44 #include <linux/dma-map-ops.h> 45 46 #include <asm/page.h> 47 #include <asm/sections.h> 48 49 #include <crypto/hash.h> 50 #include "kexec_internal.h" 51 52 atomic_t __kexec_lock = ATOMIC_INIT(0); 53 54 /* Flag to indicate we are going to kexec a new kernel */ 55 bool kexec_in_progress = false; 56 57 bool kexec_file_dbg_print; 58 59 /* 60 * When kexec transitions to the new kernel there is a one-to-one 61 * mapping between physical and virtual addresses. On processors 62 * where you can disable the MMU this is trivial, and easy. For 63 * others it is still a simple predictable page table to setup. 64 * 65 * In that environment kexec copies the new kernel to its final 66 * resting place. This means I can only support memory whose 67 * physical address can fit in an unsigned long. In particular 68 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. 69 * If the assembly stub has more restrictive requirements 70 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be 71 * defined more restrictively in <asm/kexec.h>. 72 * 73 * The code for the transition from the current kernel to the 74 * new kernel is placed in the control_code_buffer, whose size 75 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single 76 * page of memory is necessary, but some architectures require more. 77 * Because this memory must be identity mapped in the transition from 78 * virtual to physical addresses it must live in the range 79 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily 80 * modifiable. 81 * 82 * The assembly stub in the control code buffer is passed a linked list 83 * of descriptor pages detailing the source pages of the new kernel, 84 * and the destination addresses of those source pages. As this data 85 * structure is not used in the context of the current OS, it must 86 * be self-contained. 87 * 88 * The code has been made to work with highmem pages and will use a 89 * destination page in its final resting place (if it happens 90 * to allocate it). The end product of this is that most of the 91 * physical address space, and most of RAM can be used. 92 * 93 * Future directions include: 94 * - allocating a page table with the control code buffer identity 95 * mapped, to simplify machine_kexec and make kexec_on_panic more 96 * reliable. 97 */ 98 99 /* 100 * KIMAGE_NO_DEST is an impossible destination address..., for 101 * allocating pages whose destination address we do not care about. 102 */ 103 #define KIMAGE_NO_DEST (-1UL) 104 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT) 105 106 static struct page *kimage_alloc_page(struct kimage *image, 107 gfp_t gfp_mask, 108 unsigned long dest); 109 110 int sanity_check_segment_list(struct kimage *image) 111 { 112 int i; 113 unsigned long nr_segments = image->nr_segments; 114 unsigned long total_pages = 0; 115 unsigned long nr_pages = totalram_pages(); 116 117 /* 118 * Verify we have good destination addresses. The caller is 119 * responsible for making certain we don't attempt to load 120 * the new image into invalid or reserved areas of RAM. This 121 * just verifies it is an address we can use. 122 * 123 * Since the kernel does everything in page size chunks ensure 124 * the destination addresses are page aligned. Too many 125 * special cases crop of when we don't do this. The most 126 * insidious is getting overlapping destination addresses 127 * simply because addresses are changed to page size 128 * granularity. 129 */ 130 for (i = 0; i < nr_segments; i++) { 131 unsigned long mstart, mend; 132 133 mstart = image->segment[i].mem; 134 mend = mstart + image->segment[i].memsz; 135 if (mstart > mend) 136 return -EADDRNOTAVAIL; 137 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) 138 return -EADDRNOTAVAIL; 139 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) 140 return -EADDRNOTAVAIL; 141 } 142 143 /* Verify our destination addresses do not overlap. 144 * If we alloed overlapping destination addresses 145 * through very weird things can happen with no 146 * easy explanation as one segment stops on another. 147 */ 148 for (i = 0; i < nr_segments; i++) { 149 unsigned long mstart, mend; 150 unsigned long j; 151 152 mstart = image->segment[i].mem; 153 mend = mstart + image->segment[i].memsz; 154 for (j = 0; j < i; j++) { 155 unsigned long pstart, pend; 156 157 pstart = image->segment[j].mem; 158 pend = pstart + image->segment[j].memsz; 159 /* Do the segments overlap ? */ 160 if ((mend > pstart) && (mstart < pend)) 161 return -EINVAL; 162 } 163 } 164 165 /* Ensure our buffer sizes are strictly less than 166 * our memory sizes. This should always be the case, 167 * and it is easier to check up front than to be surprised 168 * later on. 169 */ 170 for (i = 0; i < nr_segments; i++) { 171 if (image->segment[i].bufsz > image->segment[i].memsz) 172 return -EINVAL; 173 } 174 175 /* 176 * Verify that no more than half of memory will be consumed. If the 177 * request from userspace is too large, a large amount of time will be 178 * wasted allocating pages, which can cause a soft lockup. 179 */ 180 for (i = 0; i < nr_segments; i++) { 181 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) 182 return -EINVAL; 183 184 total_pages += PAGE_COUNT(image->segment[i].memsz); 185 } 186 187 if (total_pages > nr_pages / 2) 188 return -EINVAL; 189 190 #ifdef CONFIG_CRASH_DUMP 191 /* 192 * Verify we have good destination addresses. Normally 193 * the caller is responsible for making certain we don't 194 * attempt to load the new image into invalid or reserved 195 * areas of RAM. But crash kernels are preloaded into a 196 * reserved area of ram. We must ensure the addresses 197 * are in the reserved area otherwise preloading the 198 * kernel could corrupt things. 199 */ 200 201 if (image->type == KEXEC_TYPE_CRASH) { 202 for (i = 0; i < nr_segments; i++) { 203 unsigned long mstart, mend; 204 205 mstart = image->segment[i].mem; 206 mend = mstart + image->segment[i].memsz - 1; 207 /* Ensure we are within the crash kernel limits */ 208 if ((mstart < phys_to_boot_phys(crashk_res.start)) || 209 (mend > phys_to_boot_phys(crashk_res.end))) 210 return -EADDRNOTAVAIL; 211 } 212 } 213 #endif 214 215 /* 216 * The destination addresses are searched from system RAM rather than 217 * being allocated from the buddy allocator, so they are not guaranteed 218 * to be accepted by the current kernel. Accept the destination 219 * addresses before kexec swaps their content with the segments' source 220 * pages to avoid accessing memory before it is accepted. 221 */ 222 for (i = 0; i < nr_segments; i++) 223 accept_memory(image->segment[i].mem, image->segment[i].memsz); 224 225 return 0; 226 } 227 228 struct kimage *do_kimage_alloc_init(void) 229 { 230 struct kimage *image; 231 232 /* Allocate a controlling structure */ 233 image = kzalloc(sizeof(*image), GFP_KERNEL); 234 if (!image) 235 return NULL; 236 237 image->entry = &image->head; 238 image->last_entry = &image->head; 239 image->control_page = ~0; /* By default this does not apply */ 240 image->type = KEXEC_TYPE_DEFAULT; 241 242 /* Initialize the list of control pages */ 243 INIT_LIST_HEAD(&image->control_pages); 244 245 /* Initialize the list of destination pages */ 246 INIT_LIST_HEAD(&image->dest_pages); 247 248 /* Initialize the list of unusable pages */ 249 INIT_LIST_HEAD(&image->unusable_pages); 250 251 #ifdef CONFIG_CRASH_HOTPLUG 252 image->hp_action = KEXEC_CRASH_HP_NONE; 253 image->elfcorehdr_index = -1; 254 image->elfcorehdr_updated = false; 255 #endif 256 257 return image; 258 } 259 260 int kimage_is_destination_range(struct kimage *image, 261 unsigned long start, 262 unsigned long end) 263 { 264 unsigned long i; 265 266 for (i = 0; i < image->nr_segments; i++) { 267 unsigned long mstart, mend; 268 269 mstart = image->segment[i].mem; 270 mend = mstart + image->segment[i].memsz - 1; 271 if ((end >= mstart) && (start <= mend)) 272 return 1; 273 } 274 275 return 0; 276 } 277 278 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) 279 { 280 struct page *pages; 281 282 if (fatal_signal_pending(current)) 283 return NULL; 284 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); 285 if (pages) { 286 unsigned int count, i; 287 288 pages->mapping = NULL; 289 set_page_private(pages, order); 290 count = 1 << order; 291 for (i = 0; i < count; i++) 292 SetPageReserved(pages + i); 293 294 arch_kexec_post_alloc_pages(page_address(pages), count, 295 gfp_mask); 296 297 if (gfp_mask & __GFP_ZERO) 298 for (i = 0; i < count; i++) 299 clear_highpage(pages + i); 300 } 301 302 return pages; 303 } 304 305 static void kimage_free_pages(struct page *page) 306 { 307 unsigned int order, count, i; 308 309 order = page_private(page); 310 count = 1 << order; 311 312 arch_kexec_pre_free_pages(page_address(page), count); 313 314 for (i = 0; i < count; i++) 315 ClearPageReserved(page + i); 316 __free_pages(page, order); 317 } 318 319 void kimage_free_page_list(struct list_head *list) 320 { 321 struct page *page, *next; 322 323 list_for_each_entry_safe(page, next, list, lru) { 324 list_del(&page->lru); 325 kimage_free_pages(page); 326 } 327 } 328 329 static struct page *kimage_alloc_normal_control_pages(struct kimage *image, 330 unsigned int order) 331 { 332 /* Control pages are special, they are the intermediaries 333 * that are needed while we copy the rest of the pages 334 * to their final resting place. As such they must 335 * not conflict with either the destination addresses 336 * or memory the kernel is already using. 337 * 338 * The only case where we really need more than one of 339 * these are for architectures where we cannot disable 340 * the MMU and must instead generate an identity mapped 341 * page table for all of the memory. 342 * 343 * At worst this runs in O(N) of the image size. 344 */ 345 struct list_head extra_pages; 346 struct page *pages; 347 unsigned int count; 348 349 count = 1 << order; 350 INIT_LIST_HEAD(&extra_pages); 351 352 /* Loop while I can allocate a page and the page allocated 353 * is a destination page. 354 */ 355 do { 356 unsigned long pfn, epfn, addr, eaddr; 357 358 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); 359 if (!pages) 360 break; 361 pfn = page_to_boot_pfn(pages); 362 epfn = pfn + count; 363 addr = pfn << PAGE_SHIFT; 364 eaddr = (epfn << PAGE_SHIFT) - 1; 365 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || 366 kimage_is_destination_range(image, addr, eaddr)) { 367 list_add(&pages->lru, &extra_pages); 368 pages = NULL; 369 } 370 } while (!pages); 371 372 if (pages) { 373 /* Remember the allocated page... */ 374 list_add(&pages->lru, &image->control_pages); 375 376 /* Because the page is already in it's destination 377 * location we will never allocate another page at 378 * that address. Therefore kimage_alloc_pages 379 * will not return it (again) and we don't need 380 * to give it an entry in image->segment[]. 381 */ 382 } 383 /* Deal with the destination pages I have inadvertently allocated. 384 * 385 * Ideally I would convert multi-page allocations into single 386 * page allocations, and add everything to image->dest_pages. 387 * 388 * For now it is simpler to just free the pages. 389 */ 390 kimage_free_page_list(&extra_pages); 391 392 return pages; 393 } 394 395 #ifdef CONFIG_CRASH_DUMP 396 static struct page *kimage_alloc_crash_control_pages(struct kimage *image, 397 unsigned int order) 398 { 399 /* Control pages are special, they are the intermediaries 400 * that are needed while we copy the rest of the pages 401 * to their final resting place. As such they must 402 * not conflict with either the destination addresses 403 * or memory the kernel is already using. 404 * 405 * Control pages are also the only pags we must allocate 406 * when loading a crash kernel. All of the other pages 407 * are specified by the segments and we just memcpy 408 * into them directly. 409 * 410 * The only case where we really need more than one of 411 * these are for architectures where we cannot disable 412 * the MMU and must instead generate an identity mapped 413 * page table for all of the memory. 414 * 415 * Given the low demand this implements a very simple 416 * allocator that finds the first hole of the appropriate 417 * size in the reserved memory region, and allocates all 418 * of the memory up to and including the hole. 419 */ 420 unsigned long hole_start, hole_end, size; 421 struct page *pages; 422 423 pages = NULL; 424 size = (1 << order) << PAGE_SHIFT; 425 hole_start = ALIGN(image->control_page, size); 426 hole_end = hole_start + size - 1; 427 while (hole_end <= crashk_res.end) { 428 unsigned long i; 429 430 cond_resched(); 431 432 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) 433 break; 434 /* See if I overlap any of the segments */ 435 for (i = 0; i < image->nr_segments; i++) { 436 unsigned long mstart, mend; 437 438 mstart = image->segment[i].mem; 439 mend = mstart + image->segment[i].memsz - 1; 440 if ((hole_end >= mstart) && (hole_start <= mend)) { 441 /* Advance the hole to the end of the segment */ 442 hole_start = ALIGN(mend, size); 443 hole_end = hole_start + size - 1; 444 break; 445 } 446 } 447 /* If I don't overlap any segments I have found my hole! */ 448 if (i == image->nr_segments) { 449 pages = pfn_to_page(hole_start >> PAGE_SHIFT); 450 image->control_page = hole_end + 1; 451 break; 452 } 453 } 454 455 /* Ensure that these pages are decrypted if SME is enabled. */ 456 if (pages) 457 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); 458 459 return pages; 460 } 461 #endif 462 463 464 struct page *kimage_alloc_control_pages(struct kimage *image, 465 unsigned int order) 466 { 467 struct page *pages = NULL; 468 469 switch (image->type) { 470 case KEXEC_TYPE_DEFAULT: 471 pages = kimage_alloc_normal_control_pages(image, order); 472 break; 473 #ifdef CONFIG_CRASH_DUMP 474 case KEXEC_TYPE_CRASH: 475 pages = kimage_alloc_crash_control_pages(image, order); 476 break; 477 #endif 478 } 479 480 return pages; 481 } 482 483 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) 484 { 485 if (*image->entry != 0) 486 image->entry++; 487 488 if (image->entry == image->last_entry) { 489 kimage_entry_t *ind_page; 490 struct page *page; 491 492 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); 493 if (!page) 494 return -ENOMEM; 495 496 ind_page = page_address(page); 497 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION; 498 image->entry = ind_page; 499 image->last_entry = ind_page + 500 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); 501 } 502 *image->entry = entry; 503 image->entry++; 504 *image->entry = 0; 505 506 return 0; 507 } 508 509 static int kimage_set_destination(struct kimage *image, 510 unsigned long destination) 511 { 512 destination &= PAGE_MASK; 513 514 return kimage_add_entry(image, destination | IND_DESTINATION); 515 } 516 517 518 static int kimage_add_page(struct kimage *image, unsigned long page) 519 { 520 page &= PAGE_MASK; 521 522 return kimage_add_entry(image, page | IND_SOURCE); 523 } 524 525 526 static void kimage_free_extra_pages(struct kimage *image) 527 { 528 /* Walk through and free any extra destination pages I may have */ 529 kimage_free_page_list(&image->dest_pages); 530 531 /* Walk through and free any unusable pages I have cached */ 532 kimage_free_page_list(&image->unusable_pages); 533 534 } 535 536 void kimage_terminate(struct kimage *image) 537 { 538 if (*image->entry != 0) 539 image->entry++; 540 541 *image->entry = IND_DONE; 542 } 543 544 #define for_each_kimage_entry(image, ptr, entry) \ 545 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ 546 ptr = (entry & IND_INDIRECTION) ? \ 547 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1) 548 549 static void kimage_free_entry(kimage_entry_t entry) 550 { 551 struct page *page; 552 553 page = boot_pfn_to_page(entry >> PAGE_SHIFT); 554 kimage_free_pages(page); 555 } 556 557 static void kimage_free_cma(struct kimage *image) 558 { 559 unsigned long i; 560 561 for (i = 0; i < image->nr_segments; i++) { 562 struct page *cma = image->segment_cma[i]; 563 u32 nr_pages = image->segment[i].memsz >> PAGE_SHIFT; 564 565 if (!cma) 566 continue; 567 568 arch_kexec_pre_free_pages(page_address(cma), nr_pages); 569 dma_release_from_contiguous(NULL, cma, nr_pages); 570 image->segment_cma[i] = NULL; 571 } 572 573 } 574 575 void kimage_free(struct kimage *image) 576 { 577 kimage_entry_t *ptr, entry; 578 kimage_entry_t ind = 0; 579 580 if (!image) 581 return; 582 583 #ifdef CONFIG_CRASH_DUMP 584 if (image->vmcoreinfo_data_copy) { 585 crash_update_vmcoreinfo_safecopy(NULL); 586 vunmap(image->vmcoreinfo_data_copy); 587 } 588 #endif 589 590 kimage_free_extra_pages(image); 591 for_each_kimage_entry(image, ptr, entry) { 592 if (entry & IND_INDIRECTION) { 593 /* Free the previous indirection page */ 594 if (ind & IND_INDIRECTION) 595 kimage_free_entry(ind); 596 /* Save this indirection page until we are 597 * done with it. 598 */ 599 ind = entry; 600 } else if (entry & IND_SOURCE) 601 kimage_free_entry(entry); 602 } 603 /* Free the final indirection page */ 604 if (ind & IND_INDIRECTION) 605 kimage_free_entry(ind); 606 607 /* Handle any machine specific cleanup */ 608 machine_kexec_cleanup(image); 609 610 /* Free the kexec control pages... */ 611 kimage_free_page_list(&image->control_pages); 612 613 /* Free CMA allocations */ 614 kimage_free_cma(image); 615 616 /* 617 * Free up any temporary buffers allocated. This might hit if 618 * error occurred much later after buffer allocation. 619 */ 620 if (image->file_mode) 621 kimage_file_post_load_cleanup(image); 622 623 kfree(image); 624 } 625 626 static kimage_entry_t *kimage_dst_used(struct kimage *image, 627 unsigned long page) 628 { 629 kimage_entry_t *ptr, entry; 630 unsigned long destination = 0; 631 632 for_each_kimage_entry(image, ptr, entry) { 633 if (entry & IND_DESTINATION) 634 destination = entry & PAGE_MASK; 635 else if (entry & IND_SOURCE) { 636 if (page == destination) 637 return ptr; 638 destination += PAGE_SIZE; 639 } 640 } 641 642 return NULL; 643 } 644 645 static struct page *kimage_alloc_page(struct kimage *image, 646 gfp_t gfp_mask, 647 unsigned long destination) 648 { 649 /* 650 * Here we implement safeguards to ensure that a source page 651 * is not copied to its destination page before the data on 652 * the destination page is no longer useful. 653 * 654 * To do this we maintain the invariant that a source page is 655 * either its own destination page, or it is not a 656 * destination page at all. 657 * 658 * That is slightly stronger than required, but the proof 659 * that no problems will not occur is trivial, and the 660 * implementation is simply to verify. 661 * 662 * When allocating all pages normally this algorithm will run 663 * in O(N) time, but in the worst case it will run in O(N^2) 664 * time. If the runtime is a problem the data structures can 665 * be fixed. 666 */ 667 struct page *page; 668 unsigned long addr; 669 670 /* 671 * Walk through the list of destination pages, and see if I 672 * have a match. 673 */ 674 list_for_each_entry(page, &image->dest_pages, lru) { 675 addr = page_to_boot_pfn(page) << PAGE_SHIFT; 676 if (addr == destination) { 677 list_del(&page->lru); 678 return page; 679 } 680 } 681 page = NULL; 682 while (1) { 683 kimage_entry_t *old; 684 685 /* Allocate a page, if we run out of memory give up */ 686 page = kimage_alloc_pages(gfp_mask, 0); 687 if (!page) 688 return NULL; 689 /* If the page cannot be used file it away */ 690 if (page_to_boot_pfn(page) > 691 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { 692 list_add(&page->lru, &image->unusable_pages); 693 continue; 694 } 695 addr = page_to_boot_pfn(page) << PAGE_SHIFT; 696 697 /* If it is the destination page we want use it */ 698 if (addr == destination) 699 break; 700 701 /* If the page is not a destination page use it */ 702 if (!kimage_is_destination_range(image, addr, 703 addr + PAGE_SIZE - 1)) 704 break; 705 706 /* 707 * I know that the page is someones destination page. 708 * See if there is already a source page for this 709 * destination page. And if so swap the source pages. 710 */ 711 old = kimage_dst_used(image, addr); 712 if (old) { 713 /* If so move it */ 714 unsigned long old_addr; 715 struct page *old_page; 716 717 old_addr = *old & PAGE_MASK; 718 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT); 719 copy_highpage(page, old_page); 720 *old = addr | (*old & ~PAGE_MASK); 721 722 /* The old page I have found cannot be a 723 * destination page, so return it if it's 724 * gfp_flags honor the ones passed in. 725 */ 726 if (!(gfp_mask & __GFP_HIGHMEM) && 727 PageHighMem(old_page)) { 728 kimage_free_pages(old_page); 729 continue; 730 } 731 page = old_page; 732 break; 733 } 734 /* Place the page on the destination list, to be used later */ 735 list_add(&page->lru, &image->dest_pages); 736 } 737 738 return page; 739 } 740 741 static int kimage_load_cma_segment(struct kimage *image, int idx) 742 { 743 struct kexec_segment *segment = &image->segment[idx]; 744 struct page *cma = image->segment_cma[idx]; 745 char *ptr = page_address(cma); 746 size_t ubytes, mbytes; 747 int result = 0; 748 unsigned char __user *buf = NULL; 749 unsigned char *kbuf = NULL; 750 751 if (image->file_mode) 752 kbuf = segment->kbuf; 753 else 754 buf = segment->buf; 755 ubytes = segment->bufsz; 756 mbytes = segment->memsz; 757 758 /* Then copy from source buffer to the CMA one */ 759 while (mbytes) { 760 size_t uchunk, mchunk; 761 762 mchunk = min_t(size_t, mbytes, PAGE_SIZE); 763 uchunk = min(ubytes, mchunk); 764 765 if (uchunk) { 766 /* For file based kexec, source pages are in kernel memory */ 767 if (image->file_mode) 768 memcpy(ptr, kbuf, uchunk); 769 else 770 result = copy_from_user(ptr, buf, uchunk); 771 ubytes -= uchunk; 772 if (image->file_mode) 773 kbuf += uchunk; 774 else 775 buf += uchunk; 776 } 777 778 if (result) { 779 result = -EFAULT; 780 goto out; 781 } 782 783 ptr += mchunk; 784 mbytes -= mchunk; 785 786 cond_resched(); 787 } 788 789 /* Clear any remainder */ 790 memset(ptr, 0, mbytes); 791 792 out: 793 return result; 794 } 795 796 static int kimage_load_normal_segment(struct kimage *image, int idx) 797 { 798 struct kexec_segment *segment = &image->segment[idx]; 799 unsigned long maddr; 800 size_t ubytes, mbytes; 801 int result; 802 unsigned char __user *buf = NULL; 803 unsigned char *kbuf = NULL; 804 805 if (image->file_mode) 806 kbuf = segment->kbuf; 807 else 808 buf = segment->buf; 809 ubytes = segment->bufsz; 810 mbytes = segment->memsz; 811 maddr = segment->mem; 812 813 if (image->segment_cma[idx]) 814 return kimage_load_cma_segment(image, idx); 815 816 result = kimage_set_destination(image, maddr); 817 if (result < 0) 818 goto out; 819 820 while (mbytes) { 821 struct page *page; 822 char *ptr; 823 size_t uchunk, mchunk; 824 825 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 826 if (!page) { 827 result = -ENOMEM; 828 goto out; 829 } 830 result = kimage_add_page(image, page_to_boot_pfn(page) 831 << PAGE_SHIFT); 832 if (result < 0) 833 goto out; 834 835 ptr = kmap_local_page(page); 836 /* Start with a clear page */ 837 clear_page(ptr); 838 mchunk = min_t(size_t, mbytes, PAGE_SIZE); 839 uchunk = min(ubytes, mchunk); 840 841 if (uchunk) { 842 /* For file based kexec, source pages are in kernel memory */ 843 if (image->file_mode) 844 memcpy(ptr, kbuf, uchunk); 845 else 846 result = copy_from_user(ptr, buf, uchunk); 847 ubytes -= uchunk; 848 if (image->file_mode) 849 kbuf += uchunk; 850 else 851 buf += uchunk; 852 } 853 kunmap_local(ptr); 854 if (result) { 855 result = -EFAULT; 856 goto out; 857 } 858 maddr += mchunk; 859 mbytes -= mchunk; 860 861 cond_resched(); 862 } 863 out: 864 return result; 865 } 866 867 #ifdef CONFIG_CRASH_DUMP 868 static int kimage_load_crash_segment(struct kimage *image, int idx) 869 { 870 /* For crash dumps kernels we simply copy the data from 871 * user space to it's destination. 872 * We do things a page at a time for the sake of kmap. 873 */ 874 struct kexec_segment *segment = &image->segment[idx]; 875 unsigned long maddr; 876 size_t ubytes, mbytes; 877 int result; 878 unsigned char __user *buf = NULL; 879 unsigned char *kbuf = NULL; 880 881 result = 0; 882 if (image->file_mode) 883 kbuf = segment->kbuf; 884 else 885 buf = segment->buf; 886 ubytes = segment->bufsz; 887 mbytes = segment->memsz; 888 maddr = segment->mem; 889 while (mbytes) { 890 struct page *page; 891 char *ptr; 892 size_t uchunk, mchunk; 893 894 page = boot_pfn_to_page(maddr >> PAGE_SHIFT); 895 if (!page) { 896 result = -ENOMEM; 897 goto out; 898 } 899 arch_kexec_post_alloc_pages(page_address(page), 1, 0); 900 ptr = kmap_local_page(page); 901 mchunk = min_t(size_t, mbytes, PAGE_SIZE); 902 uchunk = min(ubytes, mchunk); 903 if (mchunk > uchunk) { 904 /* Zero the trailing part of the page */ 905 memset(ptr + uchunk, 0, mchunk - uchunk); 906 } 907 908 if (uchunk) { 909 /* For file based kexec, source pages are in kernel memory */ 910 if (image->file_mode) 911 memcpy(ptr, kbuf, uchunk); 912 else 913 result = copy_from_user(ptr, buf, uchunk); 914 ubytes -= uchunk; 915 if (image->file_mode) 916 kbuf += uchunk; 917 else 918 buf += uchunk; 919 } 920 kexec_flush_icache_page(page); 921 kunmap_local(ptr); 922 arch_kexec_pre_free_pages(page_address(page), 1); 923 if (result) { 924 result = -EFAULT; 925 goto out; 926 } 927 maddr += mchunk; 928 mbytes -= mchunk; 929 930 cond_resched(); 931 } 932 out: 933 return result; 934 } 935 #endif 936 937 int kimage_load_segment(struct kimage *image, int idx) 938 { 939 int result = -ENOMEM; 940 941 switch (image->type) { 942 case KEXEC_TYPE_DEFAULT: 943 result = kimage_load_normal_segment(image, idx); 944 break; 945 #ifdef CONFIG_CRASH_DUMP 946 case KEXEC_TYPE_CRASH: 947 result = kimage_load_crash_segment(image, idx); 948 break; 949 #endif 950 } 951 952 return result; 953 } 954 955 void *kimage_map_segment(struct kimage *image, 956 unsigned long addr, unsigned long size) 957 { 958 unsigned long src_page_addr, dest_page_addr = 0; 959 unsigned long eaddr = addr + size; 960 kimage_entry_t *ptr, entry; 961 struct page **src_pages; 962 unsigned int npages; 963 void *vaddr = NULL; 964 int i; 965 966 /* 967 * Collect the source pages and map them in a contiguous VA range. 968 */ 969 npages = PFN_UP(eaddr) - PFN_DOWN(addr); 970 src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL); 971 if (!src_pages) { 972 pr_err("Could not allocate ima pages array.\n"); 973 return NULL; 974 } 975 976 i = 0; 977 for_each_kimage_entry(image, ptr, entry) { 978 if (entry & IND_DESTINATION) { 979 dest_page_addr = entry & PAGE_MASK; 980 } else if (entry & IND_SOURCE) { 981 if (dest_page_addr >= addr && dest_page_addr < eaddr) { 982 src_page_addr = entry & PAGE_MASK; 983 src_pages[i++] = 984 virt_to_page(__va(src_page_addr)); 985 if (i == npages) 986 break; 987 dest_page_addr += PAGE_SIZE; 988 } 989 } 990 } 991 992 /* Sanity check. */ 993 WARN_ON(i < npages); 994 995 vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL); 996 kfree(src_pages); 997 998 if (!vaddr) 999 pr_err("Could not map ima buffer.\n"); 1000 1001 return vaddr; 1002 } 1003 1004 void kimage_unmap_segment(void *segment_buffer) 1005 { 1006 vunmap(segment_buffer); 1007 } 1008 1009 struct kexec_load_limit { 1010 /* Mutex protects the limit count. */ 1011 struct mutex mutex; 1012 int limit; 1013 }; 1014 1015 static struct kexec_load_limit load_limit_reboot = { 1016 .mutex = __MUTEX_INITIALIZER(load_limit_reboot.mutex), 1017 .limit = -1, 1018 }; 1019 1020 static struct kexec_load_limit load_limit_panic = { 1021 .mutex = __MUTEX_INITIALIZER(load_limit_panic.mutex), 1022 .limit = -1, 1023 }; 1024 1025 struct kimage *kexec_image; 1026 struct kimage *kexec_crash_image; 1027 static int kexec_load_disabled; 1028 1029 #ifdef CONFIG_SYSCTL 1030 static int kexec_limit_handler(const struct ctl_table *table, int write, 1031 void *buffer, size_t *lenp, loff_t *ppos) 1032 { 1033 struct kexec_load_limit *limit = table->data; 1034 int val; 1035 struct ctl_table tmp = { 1036 .data = &val, 1037 .maxlen = sizeof(val), 1038 .mode = table->mode, 1039 }; 1040 int ret; 1041 1042 if (write) { 1043 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos); 1044 if (ret) 1045 return ret; 1046 1047 if (val < 0) 1048 return -EINVAL; 1049 1050 mutex_lock(&limit->mutex); 1051 if (limit->limit != -1 && val >= limit->limit) 1052 ret = -EINVAL; 1053 else 1054 limit->limit = val; 1055 mutex_unlock(&limit->mutex); 1056 1057 return ret; 1058 } 1059 1060 mutex_lock(&limit->mutex); 1061 val = limit->limit; 1062 mutex_unlock(&limit->mutex); 1063 1064 return proc_dointvec(&tmp, write, buffer, lenp, ppos); 1065 } 1066 1067 static const struct ctl_table kexec_core_sysctls[] = { 1068 { 1069 .procname = "kexec_load_disabled", 1070 .data = &kexec_load_disabled, 1071 .maxlen = sizeof(int), 1072 .mode = 0644, 1073 /* only handle a transition from default "0" to "1" */ 1074 .proc_handler = proc_dointvec_minmax, 1075 .extra1 = SYSCTL_ONE, 1076 .extra2 = SYSCTL_ONE, 1077 }, 1078 { 1079 .procname = "kexec_load_limit_panic", 1080 .data = &load_limit_panic, 1081 .mode = 0644, 1082 .proc_handler = kexec_limit_handler, 1083 }, 1084 { 1085 .procname = "kexec_load_limit_reboot", 1086 .data = &load_limit_reboot, 1087 .mode = 0644, 1088 .proc_handler = kexec_limit_handler, 1089 }, 1090 }; 1091 1092 static int __init kexec_core_sysctl_init(void) 1093 { 1094 register_sysctl_init("kernel", kexec_core_sysctls); 1095 return 0; 1096 } 1097 late_initcall(kexec_core_sysctl_init); 1098 #endif 1099 1100 bool kexec_load_permitted(int kexec_image_type) 1101 { 1102 struct kexec_load_limit *limit; 1103 1104 /* 1105 * Only the superuser can use the kexec syscall and if it has not 1106 * been disabled. 1107 */ 1108 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) 1109 return false; 1110 1111 /* Check limit counter and decrease it.*/ 1112 limit = (kexec_image_type == KEXEC_TYPE_CRASH) ? 1113 &load_limit_panic : &load_limit_reboot; 1114 mutex_lock(&limit->mutex); 1115 if (!limit->limit) { 1116 mutex_unlock(&limit->mutex); 1117 return false; 1118 } 1119 if (limit->limit != -1) 1120 limit->limit--; 1121 mutex_unlock(&limit->mutex); 1122 1123 return true; 1124 } 1125 1126 /* 1127 * Move into place and start executing a preloaded standalone 1128 * executable. If nothing was preloaded return an error. 1129 */ 1130 int kernel_kexec(void) 1131 { 1132 int error = 0; 1133 1134 if (!kexec_trylock()) 1135 return -EBUSY; 1136 if (!kexec_image) { 1137 error = -EINVAL; 1138 goto Unlock; 1139 } 1140 1141 error = liveupdate_reboot(); 1142 if (error) 1143 goto Unlock; 1144 1145 #ifdef CONFIG_KEXEC_JUMP 1146 if (kexec_image->preserve_context) { 1147 /* 1148 * This flow is analogous to hibernation flows that occur 1149 * before creating an image and before jumping from the 1150 * restore kernel to the image one, so it uses the same 1151 * device callbacks as those two flows. 1152 */ 1153 pm_prepare_console(); 1154 error = freeze_processes(); 1155 if (error) { 1156 error = -EBUSY; 1157 goto Restore_console; 1158 } 1159 console_suspend_all(); 1160 error = dpm_suspend_start(PMSG_FREEZE); 1161 if (error) 1162 goto Resume_devices; 1163 /* 1164 * dpm_suspend_end() must be called after dpm_suspend_start() 1165 * to complete the transition, like in the hibernation flows 1166 * mentioned above. 1167 */ 1168 error = dpm_suspend_end(PMSG_FREEZE); 1169 if (error) 1170 goto Resume_devices; 1171 error = suspend_disable_secondary_cpus(); 1172 if (error) 1173 goto Enable_cpus; 1174 local_irq_disable(); 1175 error = syscore_suspend(); 1176 if (error) 1177 goto Enable_irqs; 1178 } else 1179 #endif 1180 { 1181 kexec_in_progress = true; 1182 kernel_restart_prepare("kexec reboot"); 1183 migrate_to_reboot_cpu(); 1184 syscore_shutdown(); 1185 1186 /* 1187 * migrate_to_reboot_cpu() disables CPU hotplug assuming that 1188 * no further code needs to use CPU hotplug (which is true in 1189 * the reboot case). However, the kexec path depends on using 1190 * CPU hotplug again; so re-enable it here. 1191 */ 1192 cpu_hotplug_enable(); 1193 pr_notice("Starting new kernel\n"); 1194 machine_shutdown(); 1195 } 1196 1197 kmsg_dump(KMSG_DUMP_SHUTDOWN); 1198 machine_kexec(kexec_image); 1199 1200 #ifdef CONFIG_KEXEC_JUMP 1201 if (kexec_image->preserve_context) { 1202 /* 1203 * This flow is analogous to hibernation flows that occur after 1204 * creating an image and after the image kernel has got control 1205 * back, and in case the devices have been reset or otherwise 1206 * manipulated in the meantime, it uses the device callbacks 1207 * used by the latter. 1208 */ 1209 syscore_resume(); 1210 Enable_irqs: 1211 local_irq_enable(); 1212 Enable_cpus: 1213 suspend_enable_secondary_cpus(); 1214 dpm_resume_start(PMSG_RESTORE); 1215 Resume_devices: 1216 dpm_resume_end(PMSG_RESTORE); 1217 console_resume_all(); 1218 thaw_processes(); 1219 Restore_console: 1220 pm_restore_console(); 1221 } 1222 #endif 1223 1224 Unlock: 1225 kexec_unlock(); 1226 return error; 1227 } 1228