1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kexec.c - kexec system call core code.
4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/btf.h>
10 #include <linux/capability.h>
11 #include <linux/mm.h>
12 #include <linux/file.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 #include <linux/kexec.h>
16 #include <linux/mutex.h>
17 #include <linux/list.h>
18 #include <linux/highmem.h>
19 #include <linux/syscalls.h>
20 #include <linux/reboot.h>
21 #include <linux/ioport.h>
22 #include <linux/hardirq.h>
23 #include <linux/elf.h>
24 #include <linux/elfcore.h>
25 #include <linux/utsname.h>
26 #include <linux/numa.h>
27 #include <linux/suspend.h>
28 #include <linux/device.h>
29 #include <linux/freezer.h>
30 #include <linux/panic_notifier.h>
31 #include <linux/pm.h>
32 #include <linux/cpu.h>
33 #include <linux/uaccess.h>
34 #include <linux/io.h>
35 #include <linux/console.h>
36 #include <linux/vmalloc.h>
37 #include <linux/swap.h>
38 #include <linux/syscore_ops.h>
39 #include <linux/compiler.h>
40 #include <linux/hugetlb.h>
41 #include <linux/objtool.h>
42 #include <linux/kmsg_dump.h>
43 #include <linux/dma-map-ops.h>
44
45 #include <asm/page.h>
46 #include <asm/sections.h>
47
48 #include <crypto/hash.h>
49 #include "kexec_internal.h"
50
51 atomic_t __kexec_lock = ATOMIC_INIT(0);
52
53 /* Flag to indicate we are going to kexec a new kernel */
54 bool kexec_in_progress = false;
55
56 bool kexec_file_dbg_print;
57
58 /*
59 * When kexec transitions to the new kernel there is a one-to-one
60 * mapping between physical and virtual addresses. On processors
61 * where you can disable the MMU this is trivial, and easy. For
62 * others it is still a simple predictable page table to setup.
63 *
64 * In that environment kexec copies the new kernel to its final
65 * resting place. This means I can only support memory whose
66 * physical address can fit in an unsigned long. In particular
67 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
68 * If the assembly stub has more restrictive requirements
69 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
70 * defined more restrictively in <asm/kexec.h>.
71 *
72 * The code for the transition from the current kernel to the
73 * new kernel is placed in the control_code_buffer, whose size
74 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
75 * page of memory is necessary, but some architectures require more.
76 * Because this memory must be identity mapped in the transition from
77 * virtual to physical addresses it must live in the range
78 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
79 * modifiable.
80 *
81 * The assembly stub in the control code buffer is passed a linked list
82 * of descriptor pages detailing the source pages of the new kernel,
83 * and the destination addresses of those source pages. As this data
84 * structure is not used in the context of the current OS, it must
85 * be self-contained.
86 *
87 * The code has been made to work with highmem pages and will use a
88 * destination page in its final resting place (if it happens
89 * to allocate it). The end product of this is that most of the
90 * physical address space, and most of RAM can be used.
91 *
92 * Future directions include:
93 * - allocating a page table with the control code buffer identity
94 * mapped, to simplify machine_kexec and make kexec_on_panic more
95 * reliable.
96 */
97
98 /*
99 * KIMAGE_NO_DEST is an impossible destination address..., for
100 * allocating pages whose destination address we do not care about.
101 */
102 #define KIMAGE_NO_DEST (-1UL)
103 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
104
105 static struct page *kimage_alloc_page(struct kimage *image,
106 gfp_t gfp_mask,
107 unsigned long dest);
108
sanity_check_segment_list(struct kimage * image)109 int sanity_check_segment_list(struct kimage *image)
110 {
111 int i;
112 unsigned long nr_segments = image->nr_segments;
113 unsigned long total_pages = 0;
114 unsigned long nr_pages = totalram_pages();
115
116 /*
117 * Verify we have good destination addresses. The caller is
118 * responsible for making certain we don't attempt to load
119 * the new image into invalid or reserved areas of RAM. This
120 * just verifies it is an address we can use.
121 *
122 * Since the kernel does everything in page size chunks ensure
123 * the destination addresses are page aligned. Too many
124 * special cases crop of when we don't do this. The most
125 * insidious is getting overlapping destination addresses
126 * simply because addresses are changed to page size
127 * granularity.
128 */
129 for (i = 0; i < nr_segments; i++) {
130 unsigned long mstart, mend;
131
132 mstart = image->segment[i].mem;
133 mend = mstart + image->segment[i].memsz;
134 if (mstart > mend)
135 return -EADDRNOTAVAIL;
136 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
137 return -EADDRNOTAVAIL;
138 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
139 return -EADDRNOTAVAIL;
140 }
141
142 /* Verify our destination addresses do not overlap.
143 * If we alloed overlapping destination addresses
144 * through very weird things can happen with no
145 * easy explanation as one segment stops on another.
146 */
147 for (i = 0; i < nr_segments; i++) {
148 unsigned long mstart, mend;
149 unsigned long j;
150
151 mstart = image->segment[i].mem;
152 mend = mstart + image->segment[i].memsz;
153 for (j = 0; j < i; j++) {
154 unsigned long pstart, pend;
155
156 pstart = image->segment[j].mem;
157 pend = pstart + image->segment[j].memsz;
158 /* Do the segments overlap ? */
159 if ((mend > pstart) && (mstart < pend))
160 return -EINVAL;
161 }
162 }
163
164 /* Ensure our buffer sizes are strictly less than
165 * our memory sizes. This should always be the case,
166 * and it is easier to check up front than to be surprised
167 * later on.
168 */
169 for (i = 0; i < nr_segments; i++) {
170 if (image->segment[i].bufsz > image->segment[i].memsz)
171 return -EINVAL;
172 }
173
174 /*
175 * Verify that no more than half of memory will be consumed. If the
176 * request from userspace is too large, a large amount of time will be
177 * wasted allocating pages, which can cause a soft lockup.
178 */
179 for (i = 0; i < nr_segments; i++) {
180 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
181 return -EINVAL;
182
183 total_pages += PAGE_COUNT(image->segment[i].memsz);
184 }
185
186 if (total_pages > nr_pages / 2)
187 return -EINVAL;
188
189 #ifdef CONFIG_CRASH_DUMP
190 /*
191 * Verify we have good destination addresses. Normally
192 * the caller is responsible for making certain we don't
193 * attempt to load the new image into invalid or reserved
194 * areas of RAM. But crash kernels are preloaded into a
195 * reserved area of ram. We must ensure the addresses
196 * are in the reserved area otherwise preloading the
197 * kernel could corrupt things.
198 */
199
200 if (image->type == KEXEC_TYPE_CRASH) {
201 for (i = 0; i < nr_segments; i++) {
202 unsigned long mstart, mend;
203
204 mstart = image->segment[i].mem;
205 mend = mstart + image->segment[i].memsz - 1;
206 /* Ensure we are within the crash kernel limits */
207 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
208 (mend > phys_to_boot_phys(crashk_res.end)))
209 return -EADDRNOTAVAIL;
210 }
211 }
212 #endif
213
214 /*
215 * The destination addresses are searched from system RAM rather than
216 * being allocated from the buddy allocator, so they are not guaranteed
217 * to be accepted by the current kernel. Accept the destination
218 * addresses before kexec swaps their content with the segments' source
219 * pages to avoid accessing memory before it is accepted.
220 */
221 for (i = 0; i < nr_segments; i++)
222 accept_memory(image->segment[i].mem, image->segment[i].memsz);
223
224 return 0;
225 }
226
do_kimage_alloc_init(void)227 struct kimage *do_kimage_alloc_init(void)
228 {
229 struct kimage *image;
230
231 /* Allocate a controlling structure */
232 image = kzalloc(sizeof(*image), GFP_KERNEL);
233 if (!image)
234 return NULL;
235
236 image->head = 0;
237 image->entry = &image->head;
238 image->last_entry = &image->head;
239 image->control_page = ~0; /* By default this does not apply */
240 image->type = KEXEC_TYPE_DEFAULT;
241
242 /* Initialize the list of control pages */
243 INIT_LIST_HEAD(&image->control_pages);
244
245 /* Initialize the list of destination pages */
246 INIT_LIST_HEAD(&image->dest_pages);
247
248 /* Initialize the list of unusable pages */
249 INIT_LIST_HEAD(&image->unusable_pages);
250
251 #ifdef CONFIG_CRASH_HOTPLUG
252 image->hp_action = KEXEC_CRASH_HP_NONE;
253 image->elfcorehdr_index = -1;
254 image->elfcorehdr_updated = false;
255 #endif
256
257 return image;
258 }
259
kimage_is_destination_range(struct kimage * image,unsigned long start,unsigned long end)260 int kimage_is_destination_range(struct kimage *image,
261 unsigned long start,
262 unsigned long end)
263 {
264 unsigned long i;
265
266 for (i = 0; i < image->nr_segments; i++) {
267 unsigned long mstart, mend;
268
269 mstart = image->segment[i].mem;
270 mend = mstart + image->segment[i].memsz - 1;
271 if ((end >= mstart) && (start <= mend))
272 return 1;
273 }
274
275 return 0;
276 }
277
kimage_alloc_pages(gfp_t gfp_mask,unsigned int order)278 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
279 {
280 struct page *pages;
281
282 if (fatal_signal_pending(current))
283 return NULL;
284 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
285 if (pages) {
286 unsigned int count, i;
287
288 pages->mapping = NULL;
289 set_page_private(pages, order);
290 count = 1 << order;
291 for (i = 0; i < count; i++)
292 SetPageReserved(pages + i);
293
294 arch_kexec_post_alloc_pages(page_address(pages), count,
295 gfp_mask);
296
297 if (gfp_mask & __GFP_ZERO)
298 for (i = 0; i < count; i++)
299 clear_highpage(pages + i);
300 }
301
302 return pages;
303 }
304
kimage_free_pages(struct page * page)305 static void kimage_free_pages(struct page *page)
306 {
307 unsigned int order, count, i;
308
309 order = page_private(page);
310 count = 1 << order;
311
312 arch_kexec_pre_free_pages(page_address(page), count);
313
314 for (i = 0; i < count; i++)
315 ClearPageReserved(page + i);
316 __free_pages(page, order);
317 }
318
kimage_free_page_list(struct list_head * list)319 void kimage_free_page_list(struct list_head *list)
320 {
321 struct page *page, *next;
322
323 list_for_each_entry_safe(page, next, list, lru) {
324 list_del(&page->lru);
325 kimage_free_pages(page);
326 }
327 }
328
kimage_alloc_normal_control_pages(struct kimage * image,unsigned int order)329 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
330 unsigned int order)
331 {
332 /* Control pages are special, they are the intermediaries
333 * that are needed while we copy the rest of the pages
334 * to their final resting place. As such they must
335 * not conflict with either the destination addresses
336 * or memory the kernel is already using.
337 *
338 * The only case where we really need more than one of
339 * these are for architectures where we cannot disable
340 * the MMU and must instead generate an identity mapped
341 * page table for all of the memory.
342 *
343 * At worst this runs in O(N) of the image size.
344 */
345 struct list_head extra_pages;
346 struct page *pages;
347 unsigned int count;
348
349 count = 1 << order;
350 INIT_LIST_HEAD(&extra_pages);
351
352 /* Loop while I can allocate a page and the page allocated
353 * is a destination page.
354 */
355 do {
356 unsigned long pfn, epfn, addr, eaddr;
357
358 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
359 if (!pages)
360 break;
361 pfn = page_to_boot_pfn(pages);
362 epfn = pfn + count;
363 addr = pfn << PAGE_SHIFT;
364 eaddr = (epfn << PAGE_SHIFT) - 1;
365 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
366 kimage_is_destination_range(image, addr, eaddr)) {
367 list_add(&pages->lru, &extra_pages);
368 pages = NULL;
369 }
370 } while (!pages);
371
372 if (pages) {
373 /* Remember the allocated page... */
374 list_add(&pages->lru, &image->control_pages);
375
376 /* Because the page is already in it's destination
377 * location we will never allocate another page at
378 * that address. Therefore kimage_alloc_pages
379 * will not return it (again) and we don't need
380 * to give it an entry in image->segment[].
381 */
382 }
383 /* Deal with the destination pages I have inadvertently allocated.
384 *
385 * Ideally I would convert multi-page allocations into single
386 * page allocations, and add everything to image->dest_pages.
387 *
388 * For now it is simpler to just free the pages.
389 */
390 kimage_free_page_list(&extra_pages);
391
392 return pages;
393 }
394
395 #ifdef CONFIG_CRASH_DUMP
kimage_alloc_crash_control_pages(struct kimage * image,unsigned int order)396 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
397 unsigned int order)
398 {
399 /* Control pages are special, they are the intermediaries
400 * that are needed while we copy the rest of the pages
401 * to their final resting place. As such they must
402 * not conflict with either the destination addresses
403 * or memory the kernel is already using.
404 *
405 * Control pages are also the only pags we must allocate
406 * when loading a crash kernel. All of the other pages
407 * are specified by the segments and we just memcpy
408 * into them directly.
409 *
410 * The only case where we really need more than one of
411 * these are for architectures where we cannot disable
412 * the MMU and must instead generate an identity mapped
413 * page table for all of the memory.
414 *
415 * Given the low demand this implements a very simple
416 * allocator that finds the first hole of the appropriate
417 * size in the reserved memory region, and allocates all
418 * of the memory up to and including the hole.
419 */
420 unsigned long hole_start, hole_end, size;
421 struct page *pages;
422
423 pages = NULL;
424 size = (1 << order) << PAGE_SHIFT;
425 hole_start = ALIGN(image->control_page, size);
426 hole_end = hole_start + size - 1;
427 while (hole_end <= crashk_res.end) {
428 unsigned long i;
429
430 cond_resched();
431
432 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
433 break;
434 /* See if I overlap any of the segments */
435 for (i = 0; i < image->nr_segments; i++) {
436 unsigned long mstart, mend;
437
438 mstart = image->segment[i].mem;
439 mend = mstart + image->segment[i].memsz - 1;
440 if ((hole_end >= mstart) && (hole_start <= mend)) {
441 /* Advance the hole to the end of the segment */
442 hole_start = ALIGN(mend, size);
443 hole_end = hole_start + size - 1;
444 break;
445 }
446 }
447 /* If I don't overlap any segments I have found my hole! */
448 if (i == image->nr_segments) {
449 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
450 image->control_page = hole_end + 1;
451 break;
452 }
453 }
454
455 /* Ensure that these pages are decrypted if SME is enabled. */
456 if (pages)
457 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
458
459 return pages;
460 }
461 #endif
462
463
kimage_alloc_control_pages(struct kimage * image,unsigned int order)464 struct page *kimage_alloc_control_pages(struct kimage *image,
465 unsigned int order)
466 {
467 struct page *pages = NULL;
468
469 switch (image->type) {
470 case KEXEC_TYPE_DEFAULT:
471 pages = kimage_alloc_normal_control_pages(image, order);
472 break;
473 #ifdef CONFIG_CRASH_DUMP
474 case KEXEC_TYPE_CRASH:
475 pages = kimage_alloc_crash_control_pages(image, order);
476 break;
477 #endif
478 }
479
480 return pages;
481 }
482
kimage_add_entry(struct kimage * image,kimage_entry_t entry)483 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
484 {
485 if (*image->entry != 0)
486 image->entry++;
487
488 if (image->entry == image->last_entry) {
489 kimage_entry_t *ind_page;
490 struct page *page;
491
492 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
493 if (!page)
494 return -ENOMEM;
495
496 ind_page = page_address(page);
497 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
498 image->entry = ind_page;
499 image->last_entry = ind_page +
500 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
501 }
502 *image->entry = entry;
503 image->entry++;
504 *image->entry = 0;
505
506 return 0;
507 }
508
kimage_set_destination(struct kimage * image,unsigned long destination)509 static int kimage_set_destination(struct kimage *image,
510 unsigned long destination)
511 {
512 destination &= PAGE_MASK;
513
514 return kimage_add_entry(image, destination | IND_DESTINATION);
515 }
516
517
kimage_add_page(struct kimage * image,unsigned long page)518 static int kimage_add_page(struct kimage *image, unsigned long page)
519 {
520 page &= PAGE_MASK;
521
522 return kimage_add_entry(image, page | IND_SOURCE);
523 }
524
525
kimage_free_extra_pages(struct kimage * image)526 static void kimage_free_extra_pages(struct kimage *image)
527 {
528 /* Walk through and free any extra destination pages I may have */
529 kimage_free_page_list(&image->dest_pages);
530
531 /* Walk through and free any unusable pages I have cached */
532 kimage_free_page_list(&image->unusable_pages);
533
534 }
535
kimage_terminate(struct kimage * image)536 void kimage_terminate(struct kimage *image)
537 {
538 if (*image->entry != 0)
539 image->entry++;
540
541 *image->entry = IND_DONE;
542 }
543
544 #define for_each_kimage_entry(image, ptr, entry) \
545 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
546 ptr = (entry & IND_INDIRECTION) ? \
547 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
548
kimage_free_entry(kimage_entry_t entry)549 static void kimage_free_entry(kimage_entry_t entry)
550 {
551 struct page *page;
552
553 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
554 kimage_free_pages(page);
555 }
556
kimage_free_cma(struct kimage * image)557 static void kimage_free_cma(struct kimage *image)
558 {
559 unsigned long i;
560
561 for (i = 0; i < image->nr_segments; i++) {
562 struct page *cma = image->segment_cma[i];
563 u32 nr_pages = image->segment[i].memsz >> PAGE_SHIFT;
564
565 if (!cma)
566 continue;
567
568 arch_kexec_pre_free_pages(page_address(cma), nr_pages);
569 dma_release_from_contiguous(NULL, cma, nr_pages);
570 image->segment_cma[i] = NULL;
571 }
572
573 }
574
kimage_free(struct kimage * image)575 void kimage_free(struct kimage *image)
576 {
577 kimage_entry_t *ptr, entry;
578 kimage_entry_t ind = 0;
579
580 if (!image)
581 return;
582
583 #ifdef CONFIG_CRASH_DUMP
584 if (image->vmcoreinfo_data_copy) {
585 crash_update_vmcoreinfo_safecopy(NULL);
586 vunmap(image->vmcoreinfo_data_copy);
587 }
588 #endif
589
590 kimage_free_extra_pages(image);
591 for_each_kimage_entry(image, ptr, entry) {
592 if (entry & IND_INDIRECTION) {
593 /* Free the previous indirection page */
594 if (ind & IND_INDIRECTION)
595 kimage_free_entry(ind);
596 /* Save this indirection page until we are
597 * done with it.
598 */
599 ind = entry;
600 } else if (entry & IND_SOURCE)
601 kimage_free_entry(entry);
602 }
603 /* Free the final indirection page */
604 if (ind & IND_INDIRECTION)
605 kimage_free_entry(ind);
606
607 /* Handle any machine specific cleanup */
608 machine_kexec_cleanup(image);
609
610 /* Free the kexec control pages... */
611 kimage_free_page_list(&image->control_pages);
612
613 /* Free CMA allocations */
614 kimage_free_cma(image);
615
616 /*
617 * Free up any temporary buffers allocated. This might hit if
618 * error occurred much later after buffer allocation.
619 */
620 if (image->file_mode)
621 kimage_file_post_load_cleanup(image);
622
623 kfree(image);
624 }
625
kimage_dst_used(struct kimage * image,unsigned long page)626 static kimage_entry_t *kimage_dst_used(struct kimage *image,
627 unsigned long page)
628 {
629 kimage_entry_t *ptr, entry;
630 unsigned long destination = 0;
631
632 for_each_kimage_entry(image, ptr, entry) {
633 if (entry & IND_DESTINATION)
634 destination = entry & PAGE_MASK;
635 else if (entry & IND_SOURCE) {
636 if (page == destination)
637 return ptr;
638 destination += PAGE_SIZE;
639 }
640 }
641
642 return NULL;
643 }
644
kimage_alloc_page(struct kimage * image,gfp_t gfp_mask,unsigned long destination)645 static struct page *kimage_alloc_page(struct kimage *image,
646 gfp_t gfp_mask,
647 unsigned long destination)
648 {
649 /*
650 * Here we implement safeguards to ensure that a source page
651 * is not copied to its destination page before the data on
652 * the destination page is no longer useful.
653 *
654 * To do this we maintain the invariant that a source page is
655 * either its own destination page, or it is not a
656 * destination page at all.
657 *
658 * That is slightly stronger than required, but the proof
659 * that no problems will not occur is trivial, and the
660 * implementation is simply to verify.
661 *
662 * When allocating all pages normally this algorithm will run
663 * in O(N) time, but in the worst case it will run in O(N^2)
664 * time. If the runtime is a problem the data structures can
665 * be fixed.
666 */
667 struct page *page;
668 unsigned long addr;
669
670 /*
671 * Walk through the list of destination pages, and see if I
672 * have a match.
673 */
674 list_for_each_entry(page, &image->dest_pages, lru) {
675 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
676 if (addr == destination) {
677 list_del(&page->lru);
678 return page;
679 }
680 }
681 page = NULL;
682 while (1) {
683 kimage_entry_t *old;
684
685 /* Allocate a page, if we run out of memory give up */
686 page = kimage_alloc_pages(gfp_mask, 0);
687 if (!page)
688 return NULL;
689 /* If the page cannot be used file it away */
690 if (page_to_boot_pfn(page) >
691 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
692 list_add(&page->lru, &image->unusable_pages);
693 continue;
694 }
695 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
696
697 /* If it is the destination page we want use it */
698 if (addr == destination)
699 break;
700
701 /* If the page is not a destination page use it */
702 if (!kimage_is_destination_range(image, addr,
703 addr + PAGE_SIZE - 1))
704 break;
705
706 /*
707 * I know that the page is someones destination page.
708 * See if there is already a source page for this
709 * destination page. And if so swap the source pages.
710 */
711 old = kimage_dst_used(image, addr);
712 if (old) {
713 /* If so move it */
714 unsigned long old_addr;
715 struct page *old_page;
716
717 old_addr = *old & PAGE_MASK;
718 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
719 copy_highpage(page, old_page);
720 *old = addr | (*old & ~PAGE_MASK);
721
722 /* The old page I have found cannot be a
723 * destination page, so return it if it's
724 * gfp_flags honor the ones passed in.
725 */
726 if (!(gfp_mask & __GFP_HIGHMEM) &&
727 PageHighMem(old_page)) {
728 kimage_free_pages(old_page);
729 continue;
730 }
731 page = old_page;
732 break;
733 }
734 /* Place the page on the destination list, to be used later */
735 list_add(&page->lru, &image->dest_pages);
736 }
737
738 return page;
739 }
740
kimage_load_cma_segment(struct kimage * image,int idx)741 static int kimage_load_cma_segment(struct kimage *image, int idx)
742 {
743 struct kexec_segment *segment = &image->segment[idx];
744 struct page *cma = image->segment_cma[idx];
745 char *ptr = page_address(cma);
746 unsigned long maddr;
747 size_t ubytes, mbytes;
748 int result = 0;
749 unsigned char __user *buf = NULL;
750 unsigned char *kbuf = NULL;
751
752 if (image->file_mode)
753 kbuf = segment->kbuf;
754 else
755 buf = segment->buf;
756 ubytes = segment->bufsz;
757 mbytes = segment->memsz;
758 maddr = segment->mem;
759
760 /* Then copy from source buffer to the CMA one */
761 while (mbytes) {
762 size_t uchunk, mchunk;
763
764 ptr += maddr & ~PAGE_MASK;
765 mchunk = min_t(size_t, mbytes,
766 PAGE_SIZE - (maddr & ~PAGE_MASK));
767 uchunk = min(ubytes, mchunk);
768
769 if (uchunk) {
770 /* For file based kexec, source pages are in kernel memory */
771 if (image->file_mode)
772 memcpy(ptr, kbuf, uchunk);
773 else
774 result = copy_from_user(ptr, buf, uchunk);
775 ubytes -= uchunk;
776 if (image->file_mode)
777 kbuf += uchunk;
778 else
779 buf += uchunk;
780 }
781
782 if (result) {
783 result = -EFAULT;
784 goto out;
785 }
786
787 ptr += mchunk;
788 maddr += mchunk;
789 mbytes -= mchunk;
790
791 cond_resched();
792 }
793
794 /* Clear any remainder */
795 memset(ptr, 0, mbytes);
796
797 out:
798 return result;
799 }
800
kimage_load_normal_segment(struct kimage * image,int idx)801 static int kimage_load_normal_segment(struct kimage *image, int idx)
802 {
803 struct kexec_segment *segment = &image->segment[idx];
804 unsigned long maddr;
805 size_t ubytes, mbytes;
806 int result;
807 unsigned char __user *buf = NULL;
808 unsigned char *kbuf = NULL;
809
810 if (image->file_mode)
811 kbuf = segment->kbuf;
812 else
813 buf = segment->buf;
814 ubytes = segment->bufsz;
815 mbytes = segment->memsz;
816 maddr = segment->mem;
817
818 if (image->segment_cma[idx])
819 return kimage_load_cma_segment(image, idx);
820
821 result = kimage_set_destination(image, maddr);
822 if (result < 0)
823 goto out;
824
825 while (mbytes) {
826 struct page *page;
827 char *ptr;
828 size_t uchunk, mchunk;
829
830 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
831 if (!page) {
832 result = -ENOMEM;
833 goto out;
834 }
835 result = kimage_add_page(image, page_to_boot_pfn(page)
836 << PAGE_SHIFT);
837 if (result < 0)
838 goto out;
839
840 ptr = kmap_local_page(page);
841 /* Start with a clear page */
842 clear_page(ptr);
843 ptr += maddr & ~PAGE_MASK;
844 mchunk = min_t(size_t, mbytes,
845 PAGE_SIZE - (maddr & ~PAGE_MASK));
846 uchunk = min(ubytes, mchunk);
847
848 if (uchunk) {
849 /* For file based kexec, source pages are in kernel memory */
850 if (image->file_mode)
851 memcpy(ptr, kbuf, uchunk);
852 else
853 result = copy_from_user(ptr, buf, uchunk);
854 ubytes -= uchunk;
855 if (image->file_mode)
856 kbuf += uchunk;
857 else
858 buf += uchunk;
859 }
860 kunmap_local(ptr);
861 if (result) {
862 result = -EFAULT;
863 goto out;
864 }
865 maddr += mchunk;
866 mbytes -= mchunk;
867
868 cond_resched();
869 }
870 out:
871 return result;
872 }
873
874 #ifdef CONFIG_CRASH_DUMP
kimage_load_crash_segment(struct kimage * image,int idx)875 static int kimage_load_crash_segment(struct kimage *image, int idx)
876 {
877 /* For crash dumps kernels we simply copy the data from
878 * user space to it's destination.
879 * We do things a page at a time for the sake of kmap.
880 */
881 struct kexec_segment *segment = &image->segment[idx];
882 unsigned long maddr;
883 size_t ubytes, mbytes;
884 int result;
885 unsigned char __user *buf = NULL;
886 unsigned char *kbuf = NULL;
887
888 result = 0;
889 if (image->file_mode)
890 kbuf = segment->kbuf;
891 else
892 buf = segment->buf;
893 ubytes = segment->bufsz;
894 mbytes = segment->memsz;
895 maddr = segment->mem;
896 while (mbytes) {
897 struct page *page;
898 char *ptr;
899 size_t uchunk, mchunk;
900
901 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
902 if (!page) {
903 result = -ENOMEM;
904 goto out;
905 }
906 arch_kexec_post_alloc_pages(page_address(page), 1, 0);
907 ptr = kmap_local_page(page);
908 ptr += maddr & ~PAGE_MASK;
909 mchunk = min_t(size_t, mbytes,
910 PAGE_SIZE - (maddr & ~PAGE_MASK));
911 uchunk = min(ubytes, mchunk);
912 if (mchunk > uchunk) {
913 /* Zero the trailing part of the page */
914 memset(ptr + uchunk, 0, mchunk - uchunk);
915 }
916
917 if (uchunk) {
918 /* For file based kexec, source pages are in kernel memory */
919 if (image->file_mode)
920 memcpy(ptr, kbuf, uchunk);
921 else
922 result = copy_from_user(ptr, buf, uchunk);
923 ubytes -= uchunk;
924 if (image->file_mode)
925 kbuf += uchunk;
926 else
927 buf += uchunk;
928 }
929 kexec_flush_icache_page(page);
930 kunmap_local(ptr);
931 arch_kexec_pre_free_pages(page_address(page), 1);
932 if (result) {
933 result = -EFAULT;
934 goto out;
935 }
936 maddr += mchunk;
937 mbytes -= mchunk;
938
939 cond_resched();
940 }
941 out:
942 return result;
943 }
944 #endif
945
kimage_load_segment(struct kimage * image,int idx)946 int kimage_load_segment(struct kimage *image, int idx)
947 {
948 int result = -ENOMEM;
949
950 switch (image->type) {
951 case KEXEC_TYPE_DEFAULT:
952 result = kimage_load_normal_segment(image, idx);
953 break;
954 #ifdef CONFIG_CRASH_DUMP
955 case KEXEC_TYPE_CRASH:
956 result = kimage_load_crash_segment(image, idx);
957 break;
958 #endif
959 }
960
961 return result;
962 }
963
kimage_map_segment(struct kimage * image,unsigned long addr,unsigned long size)964 void *kimage_map_segment(struct kimage *image,
965 unsigned long addr, unsigned long size)
966 {
967 unsigned long src_page_addr, dest_page_addr = 0;
968 unsigned long eaddr = addr + size;
969 kimage_entry_t *ptr, entry;
970 struct page **src_pages;
971 unsigned int npages;
972 void *vaddr = NULL;
973 int i;
974
975 /*
976 * Collect the source pages and map them in a contiguous VA range.
977 */
978 npages = PFN_UP(eaddr) - PFN_DOWN(addr);
979 src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
980 if (!src_pages) {
981 pr_err("Could not allocate ima pages array.\n");
982 return NULL;
983 }
984
985 i = 0;
986 for_each_kimage_entry(image, ptr, entry) {
987 if (entry & IND_DESTINATION) {
988 dest_page_addr = entry & PAGE_MASK;
989 } else if (entry & IND_SOURCE) {
990 if (dest_page_addr >= addr && dest_page_addr < eaddr) {
991 src_page_addr = entry & PAGE_MASK;
992 src_pages[i++] =
993 virt_to_page(__va(src_page_addr));
994 if (i == npages)
995 break;
996 dest_page_addr += PAGE_SIZE;
997 }
998 }
999 }
1000
1001 /* Sanity check. */
1002 WARN_ON(i < npages);
1003
1004 vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
1005 kfree(src_pages);
1006
1007 if (!vaddr)
1008 pr_err("Could not map ima buffer.\n");
1009
1010 return vaddr;
1011 }
1012
kimage_unmap_segment(void * segment_buffer)1013 void kimage_unmap_segment(void *segment_buffer)
1014 {
1015 vunmap(segment_buffer);
1016 }
1017
1018 struct kexec_load_limit {
1019 /* Mutex protects the limit count. */
1020 struct mutex mutex;
1021 int limit;
1022 };
1023
1024 static struct kexec_load_limit load_limit_reboot = {
1025 .mutex = __MUTEX_INITIALIZER(load_limit_reboot.mutex),
1026 .limit = -1,
1027 };
1028
1029 static struct kexec_load_limit load_limit_panic = {
1030 .mutex = __MUTEX_INITIALIZER(load_limit_panic.mutex),
1031 .limit = -1,
1032 };
1033
1034 struct kimage *kexec_image;
1035 struct kimage *kexec_crash_image;
1036 static int kexec_load_disabled;
1037
1038 #ifdef CONFIG_SYSCTL
kexec_limit_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1039 static int kexec_limit_handler(const struct ctl_table *table, int write,
1040 void *buffer, size_t *lenp, loff_t *ppos)
1041 {
1042 struct kexec_load_limit *limit = table->data;
1043 int val;
1044 struct ctl_table tmp = {
1045 .data = &val,
1046 .maxlen = sizeof(val),
1047 .mode = table->mode,
1048 };
1049 int ret;
1050
1051 if (write) {
1052 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
1053 if (ret)
1054 return ret;
1055
1056 if (val < 0)
1057 return -EINVAL;
1058
1059 mutex_lock(&limit->mutex);
1060 if (limit->limit != -1 && val >= limit->limit)
1061 ret = -EINVAL;
1062 else
1063 limit->limit = val;
1064 mutex_unlock(&limit->mutex);
1065
1066 return ret;
1067 }
1068
1069 mutex_lock(&limit->mutex);
1070 val = limit->limit;
1071 mutex_unlock(&limit->mutex);
1072
1073 return proc_dointvec(&tmp, write, buffer, lenp, ppos);
1074 }
1075
1076 static const struct ctl_table kexec_core_sysctls[] = {
1077 {
1078 .procname = "kexec_load_disabled",
1079 .data = &kexec_load_disabled,
1080 .maxlen = sizeof(int),
1081 .mode = 0644,
1082 /* only handle a transition from default "0" to "1" */
1083 .proc_handler = proc_dointvec_minmax,
1084 .extra1 = SYSCTL_ONE,
1085 .extra2 = SYSCTL_ONE,
1086 },
1087 {
1088 .procname = "kexec_load_limit_panic",
1089 .data = &load_limit_panic,
1090 .mode = 0644,
1091 .proc_handler = kexec_limit_handler,
1092 },
1093 {
1094 .procname = "kexec_load_limit_reboot",
1095 .data = &load_limit_reboot,
1096 .mode = 0644,
1097 .proc_handler = kexec_limit_handler,
1098 },
1099 };
1100
kexec_core_sysctl_init(void)1101 static int __init kexec_core_sysctl_init(void)
1102 {
1103 register_sysctl_init("kernel", kexec_core_sysctls);
1104 return 0;
1105 }
1106 late_initcall(kexec_core_sysctl_init);
1107 #endif
1108
kexec_load_permitted(int kexec_image_type)1109 bool kexec_load_permitted(int kexec_image_type)
1110 {
1111 struct kexec_load_limit *limit;
1112
1113 /*
1114 * Only the superuser can use the kexec syscall and if it has not
1115 * been disabled.
1116 */
1117 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1118 return false;
1119
1120 /* Check limit counter and decrease it.*/
1121 limit = (kexec_image_type == KEXEC_TYPE_CRASH) ?
1122 &load_limit_panic : &load_limit_reboot;
1123 mutex_lock(&limit->mutex);
1124 if (!limit->limit) {
1125 mutex_unlock(&limit->mutex);
1126 return false;
1127 }
1128 if (limit->limit != -1)
1129 limit->limit--;
1130 mutex_unlock(&limit->mutex);
1131
1132 return true;
1133 }
1134
1135 /*
1136 * Move into place and start executing a preloaded standalone
1137 * executable. If nothing was preloaded return an error.
1138 */
kernel_kexec(void)1139 int kernel_kexec(void)
1140 {
1141 int error = 0;
1142
1143 if (!kexec_trylock())
1144 return -EBUSY;
1145 if (!kexec_image) {
1146 error = -EINVAL;
1147 goto Unlock;
1148 }
1149
1150 #ifdef CONFIG_KEXEC_JUMP
1151 if (kexec_image->preserve_context) {
1152 /*
1153 * This flow is analogous to hibernation flows that occur
1154 * before creating an image and before jumping from the
1155 * restore kernel to the image one, so it uses the same
1156 * device callbacks as those two flows.
1157 */
1158 pm_prepare_console();
1159 error = freeze_processes();
1160 if (error) {
1161 error = -EBUSY;
1162 goto Restore_console;
1163 }
1164 console_suspend_all();
1165 error = dpm_suspend_start(PMSG_FREEZE);
1166 if (error)
1167 goto Resume_devices;
1168 /*
1169 * dpm_suspend_end() must be called after dpm_suspend_start()
1170 * to complete the transition, like in the hibernation flows
1171 * mentioned above.
1172 */
1173 error = dpm_suspend_end(PMSG_FREEZE);
1174 if (error)
1175 goto Resume_devices;
1176 error = suspend_disable_secondary_cpus();
1177 if (error)
1178 goto Enable_cpus;
1179 local_irq_disable();
1180 error = syscore_suspend();
1181 if (error)
1182 goto Enable_irqs;
1183 } else
1184 #endif
1185 {
1186 kexec_in_progress = true;
1187 kernel_restart_prepare("kexec reboot");
1188 migrate_to_reboot_cpu();
1189 syscore_shutdown();
1190
1191 /*
1192 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1193 * no further code needs to use CPU hotplug (which is true in
1194 * the reboot case). However, the kexec path depends on using
1195 * CPU hotplug again; so re-enable it here.
1196 */
1197 cpu_hotplug_enable();
1198 pr_notice("Starting new kernel\n");
1199 machine_shutdown();
1200 }
1201
1202 kmsg_dump(KMSG_DUMP_SHUTDOWN);
1203 machine_kexec(kexec_image);
1204
1205 #ifdef CONFIG_KEXEC_JUMP
1206 if (kexec_image->preserve_context) {
1207 /*
1208 * This flow is analogous to hibernation flows that occur after
1209 * creating an image and after the image kernel has got control
1210 * back, and in case the devices have been reset or otherwise
1211 * manipulated in the meantime, it uses the device callbacks
1212 * used by the latter.
1213 */
1214 syscore_resume();
1215 Enable_irqs:
1216 local_irq_enable();
1217 Enable_cpus:
1218 suspend_enable_secondary_cpus();
1219 dpm_resume_start(PMSG_RESTORE);
1220 Resume_devices:
1221 dpm_resume_end(PMSG_RESTORE);
1222 console_resume_all();
1223 thaw_processes();
1224 Restore_console:
1225 pm_restore_console();
1226 }
1227 #endif
1228
1229 Unlock:
1230 kexec_unlock();
1231 return error;
1232 }
1233