1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * kexec.c - kexec system call core code.
4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/btf.h>
10 #include <linux/capability.h>
11 #include <linux/mm.h>
12 #include <linux/file.h>
13 #include <linux/slab.h>
14 #include <linux/fs.h>
15 #include <linux/kexec.h>
16 #include <linux/mutex.h>
17 #include <linux/list.h>
18 #include <linux/liveupdate.h>
19 #include <linux/highmem.h>
20 #include <linux/syscalls.h>
21 #include <linux/reboot.h>
22 #include <linux/ioport.h>
23 #include <linux/hardirq.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/utsname.h>
27 #include <linux/numa.h>
28 #include <linux/suspend.h>
29 #include <linux/device.h>
30 #include <linux/freezer.h>
31 #include <linux/panic_notifier.h>
32 #include <linux/pm.h>
33 #include <linux/cpu.h>
34 #include <linux/uaccess.h>
35 #include <linux/io.h>
36 #include <linux/console.h>
37 #include <linux/vmalloc.h>
38 #include <linux/swap.h>
39 #include <linux/syscore_ops.h>
40 #include <linux/compiler.h>
41 #include <linux/hugetlb.h>
42 #include <linux/objtool.h>
43 #include <linux/kmsg_dump.h>
44 #include <linux/dma-map-ops.h>
45 #include <linux/sysfs.h>
46
47 #include <asm/page.h>
48 #include <asm/sections.h>
49
50 #include <crypto/hash.h>
51 #include "kexec_internal.h"
52
53 atomic_t __kexec_lock = ATOMIC_INIT(0);
54
55 /* Flag to indicate we are going to kexec a new kernel */
56 bool kexec_in_progress = false;
57
58 bool kexec_file_dbg_print;
59
60 /*
61 * When kexec transitions to the new kernel there is a one-to-one
62 * mapping between physical and virtual addresses. On processors
63 * where you can disable the MMU this is trivial, and easy. For
64 * others it is still a simple predictable page table to setup.
65 *
66 * In that environment kexec copies the new kernel to its final
67 * resting place. This means I can only support memory whose
68 * physical address can fit in an unsigned long. In particular
69 * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
70 * If the assembly stub has more restrictive requirements
71 * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
72 * defined more restrictively in <asm/kexec.h>.
73 *
74 * The code for the transition from the current kernel to the
75 * new kernel is placed in the control_code_buffer, whose size
76 * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single
77 * page of memory is necessary, but some architectures require more.
78 * Because this memory must be identity mapped in the transition from
79 * virtual to physical addresses it must live in the range
80 * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
81 * modifiable.
82 *
83 * The assembly stub in the control code buffer is passed a linked list
84 * of descriptor pages detailing the source pages of the new kernel,
85 * and the destination addresses of those source pages. As this data
86 * structure is not used in the context of the current OS, it must
87 * be self-contained.
88 *
89 * The code has been made to work with highmem pages and will use a
90 * destination page in its final resting place (if it happens
91 * to allocate it). The end product of this is that most of the
92 * physical address space, and most of RAM can be used.
93 *
94 * Future directions include:
95 * - allocating a page table with the control code buffer identity
96 * mapped, to simplify machine_kexec and make kexec_on_panic more
97 * reliable.
98 */
99
100 /*
101 * KIMAGE_NO_DEST is an impossible destination address..., for
102 * allocating pages whose destination address we do not care about.
103 */
104 #define KIMAGE_NO_DEST (-1UL)
105 #define PAGE_COUNT(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
106
107 static struct page *kimage_alloc_page(struct kimage *image,
108 gfp_t gfp_mask,
109 unsigned long dest);
110
sanity_check_segment_list(struct kimage * image)111 int sanity_check_segment_list(struct kimage *image)
112 {
113 int i;
114 unsigned long nr_segments = image->nr_segments;
115 unsigned long total_pages = 0;
116 unsigned long nr_pages = totalram_pages();
117
118 /*
119 * Verify we have good destination addresses. The caller is
120 * responsible for making certain we don't attempt to load
121 * the new image into invalid or reserved areas of RAM. This
122 * just verifies it is an address we can use.
123 *
124 * Since the kernel does everything in page size chunks ensure
125 * the destination addresses are page aligned. Too many
126 * special cases crop of when we don't do this. The most
127 * insidious is getting overlapping destination addresses
128 * simply because addresses are changed to page size
129 * granularity.
130 */
131 for (i = 0; i < nr_segments; i++) {
132 unsigned long mstart, mend;
133
134 mstart = image->segment[i].mem;
135 mend = mstart + image->segment[i].memsz;
136 if (mstart > mend)
137 return -EADDRNOTAVAIL;
138 if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
139 return -EADDRNOTAVAIL;
140 if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
141 return -EADDRNOTAVAIL;
142 }
143
144 /* Verify our destination addresses do not overlap.
145 * If we alloed overlapping destination addresses
146 * through very weird things can happen with no
147 * easy explanation as one segment stops on another.
148 */
149 for (i = 0; i < nr_segments; i++) {
150 unsigned long mstart, mend;
151 unsigned long j;
152
153 mstart = image->segment[i].mem;
154 mend = mstart + image->segment[i].memsz;
155 for (j = 0; j < i; j++) {
156 unsigned long pstart, pend;
157
158 pstart = image->segment[j].mem;
159 pend = pstart + image->segment[j].memsz;
160 /* Do the segments overlap ? */
161 if ((mend > pstart) && (mstart < pend))
162 return -EINVAL;
163 }
164 }
165
166 /* Ensure our buffer sizes are strictly less than
167 * our memory sizes. This should always be the case,
168 * and it is easier to check up front than to be surprised
169 * later on.
170 */
171 for (i = 0; i < nr_segments; i++) {
172 if (image->segment[i].bufsz > image->segment[i].memsz)
173 return -EINVAL;
174 }
175
176 /*
177 * Verify that no more than half of memory will be consumed. If the
178 * request from userspace is too large, a large amount of time will be
179 * wasted allocating pages, which can cause a soft lockup.
180 */
181 for (i = 0; i < nr_segments; i++) {
182 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2)
183 return -EINVAL;
184
185 total_pages += PAGE_COUNT(image->segment[i].memsz);
186 }
187
188 if (total_pages > nr_pages / 2)
189 return -EINVAL;
190
191 #ifdef CONFIG_CRASH_DUMP
192 /*
193 * Verify we have good destination addresses. Normally
194 * the caller is responsible for making certain we don't
195 * attempt to load the new image into invalid or reserved
196 * areas of RAM. But crash kernels are preloaded into a
197 * reserved area of ram. We must ensure the addresses
198 * are in the reserved area otherwise preloading the
199 * kernel could corrupt things.
200 */
201
202 if (image->type == KEXEC_TYPE_CRASH) {
203 for (i = 0; i < nr_segments; i++) {
204 unsigned long mstart, mend;
205
206 mstart = image->segment[i].mem;
207 mend = mstart + image->segment[i].memsz - 1;
208 /* Ensure we are within the crash kernel limits */
209 if ((mstart < phys_to_boot_phys(crashk_res.start)) ||
210 (mend > phys_to_boot_phys(crashk_res.end)))
211 return -EADDRNOTAVAIL;
212 }
213 }
214 #endif
215
216 /*
217 * The destination addresses are searched from system RAM rather than
218 * being allocated from the buddy allocator, so they are not guaranteed
219 * to be accepted by the current kernel. Accept the destination
220 * addresses before kexec swaps their content with the segments' source
221 * pages to avoid accessing memory before it is accepted.
222 */
223 for (i = 0; i < nr_segments; i++)
224 accept_memory(image->segment[i].mem, image->segment[i].memsz);
225
226 return 0;
227 }
228
do_kimage_alloc_init(void)229 struct kimage *do_kimage_alloc_init(void)
230 {
231 struct kimage *image;
232
233 /* Allocate a controlling structure */
234 image = kzalloc(sizeof(*image), GFP_KERNEL);
235 if (!image)
236 return NULL;
237
238 image->entry = &image->head;
239 image->last_entry = &image->head;
240 image->control_page = ~0; /* By default this does not apply */
241 image->type = KEXEC_TYPE_DEFAULT;
242
243 /* Initialize the list of control pages */
244 INIT_LIST_HEAD(&image->control_pages);
245
246 /* Initialize the list of destination pages */
247 INIT_LIST_HEAD(&image->dest_pages);
248
249 /* Initialize the list of unusable pages */
250 INIT_LIST_HEAD(&image->unusable_pages);
251
252 #ifdef CONFIG_CRASH_HOTPLUG
253 image->hp_action = KEXEC_CRASH_HP_NONE;
254 image->elfcorehdr_index = -1;
255 image->elfcorehdr_updated = false;
256 #endif
257
258 return image;
259 }
260
kimage_is_destination_range(struct kimage * image,unsigned long start,unsigned long end)261 int kimage_is_destination_range(struct kimage *image,
262 unsigned long start,
263 unsigned long end)
264 {
265 unsigned long i;
266
267 for (i = 0; i < image->nr_segments; i++) {
268 unsigned long mstart, mend;
269
270 mstart = image->segment[i].mem;
271 mend = mstart + image->segment[i].memsz - 1;
272 if ((end >= mstart) && (start <= mend))
273 return 1;
274 }
275
276 return 0;
277 }
278
kimage_alloc_pages(gfp_t gfp_mask,unsigned int order)279 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
280 {
281 struct page *pages;
282
283 if (fatal_signal_pending(current))
284 return NULL;
285 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order);
286 if (pages) {
287 unsigned int count, i;
288
289 pages->mapping = NULL;
290 set_page_private(pages, order);
291 count = 1 << order;
292 for (i = 0; i < count; i++)
293 SetPageReserved(pages + i);
294
295 arch_kexec_post_alloc_pages(page_address(pages), count,
296 gfp_mask);
297
298 if (gfp_mask & __GFP_ZERO)
299 for (i = 0; i < count; i++)
300 clear_highpage(pages + i);
301 }
302
303 return pages;
304 }
305
kimage_free_pages(struct page * page)306 static void kimage_free_pages(struct page *page)
307 {
308 unsigned int order, count, i;
309
310 order = page_private(page);
311 count = 1 << order;
312
313 arch_kexec_pre_free_pages(page_address(page), count);
314
315 for (i = 0; i < count; i++)
316 ClearPageReserved(page + i);
317 __free_pages(page, order);
318 }
319
kimage_free_page_list(struct list_head * list)320 void kimage_free_page_list(struct list_head *list)
321 {
322 struct page *page, *next;
323
324 list_for_each_entry_safe(page, next, list, lru) {
325 list_del(&page->lru);
326 kimage_free_pages(page);
327 }
328 }
329
kimage_alloc_normal_control_pages(struct kimage * image,unsigned int order)330 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
331 unsigned int order)
332 {
333 /* Control pages are special, they are the intermediaries
334 * that are needed while we copy the rest of the pages
335 * to their final resting place. As such they must
336 * not conflict with either the destination addresses
337 * or memory the kernel is already using.
338 *
339 * The only case where we really need more than one of
340 * these are for architectures where we cannot disable
341 * the MMU and must instead generate an identity mapped
342 * page table for all of the memory.
343 *
344 * At worst this runs in O(N) of the image size.
345 */
346 struct list_head extra_pages;
347 struct page *pages;
348 unsigned int count;
349
350 count = 1 << order;
351 INIT_LIST_HEAD(&extra_pages);
352
353 /* Loop while I can allocate a page and the page allocated
354 * is a destination page.
355 */
356 do {
357 unsigned long pfn, epfn, addr, eaddr;
358
359 pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
360 if (!pages)
361 break;
362 pfn = page_to_boot_pfn(pages);
363 epfn = pfn + count;
364 addr = pfn << PAGE_SHIFT;
365 eaddr = (epfn << PAGE_SHIFT) - 1;
366 if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
367 kimage_is_destination_range(image, addr, eaddr)) {
368 list_add(&pages->lru, &extra_pages);
369 pages = NULL;
370 }
371 } while (!pages);
372
373 if (pages) {
374 /* Remember the allocated page... */
375 list_add(&pages->lru, &image->control_pages);
376
377 /* Because the page is already in it's destination
378 * location we will never allocate another page at
379 * that address. Therefore kimage_alloc_pages
380 * will not return it (again) and we don't need
381 * to give it an entry in image->segment[].
382 */
383 }
384 /* Deal with the destination pages I have inadvertently allocated.
385 *
386 * Ideally I would convert multi-page allocations into single
387 * page allocations, and add everything to image->dest_pages.
388 *
389 * For now it is simpler to just free the pages.
390 */
391 kimage_free_page_list(&extra_pages);
392
393 return pages;
394 }
395
396 #ifdef CONFIG_CRASH_DUMP
kimage_alloc_crash_control_pages(struct kimage * image,unsigned int order)397 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
398 unsigned int order)
399 {
400 /* Control pages are special, they are the intermediaries
401 * that are needed while we copy the rest of the pages
402 * to their final resting place. As such they must
403 * not conflict with either the destination addresses
404 * or memory the kernel is already using.
405 *
406 * Control pages are also the only pags we must allocate
407 * when loading a crash kernel. All of the other pages
408 * are specified by the segments and we just memcpy
409 * into them directly.
410 *
411 * The only case where we really need more than one of
412 * these are for architectures where we cannot disable
413 * the MMU and must instead generate an identity mapped
414 * page table for all of the memory.
415 *
416 * Given the low demand this implements a very simple
417 * allocator that finds the first hole of the appropriate
418 * size in the reserved memory region, and allocates all
419 * of the memory up to and including the hole.
420 */
421 unsigned long hole_start, hole_end, size;
422 struct page *pages;
423
424 pages = NULL;
425 size = (1 << order) << PAGE_SHIFT;
426 hole_start = ALIGN(image->control_page, size);
427 hole_end = hole_start + size - 1;
428 while (hole_end <= crashk_res.end) {
429 unsigned long i;
430
431 cond_resched();
432
433 if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
434 break;
435 /* See if I overlap any of the segments */
436 for (i = 0; i < image->nr_segments; i++) {
437 unsigned long mstart, mend;
438
439 mstart = image->segment[i].mem;
440 mend = mstart + image->segment[i].memsz - 1;
441 if ((hole_end >= mstart) && (hole_start <= mend)) {
442 /* Advance the hole to the end of the segment */
443 hole_start = ALIGN(mend, size);
444 hole_end = hole_start + size - 1;
445 break;
446 }
447 }
448 /* If I don't overlap any segments I have found my hole! */
449 if (i == image->nr_segments) {
450 pages = pfn_to_page(hole_start >> PAGE_SHIFT);
451 image->control_page = hole_end + 1;
452 break;
453 }
454 }
455
456 /* Ensure that these pages are decrypted if SME is enabled. */
457 if (pages)
458 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0);
459
460 return pages;
461 }
462 #endif
463
464
kimage_alloc_control_pages(struct kimage * image,unsigned int order)465 struct page *kimage_alloc_control_pages(struct kimage *image,
466 unsigned int order)
467 {
468 struct page *pages = NULL;
469
470 switch (image->type) {
471 case KEXEC_TYPE_DEFAULT:
472 pages = kimage_alloc_normal_control_pages(image, order);
473 break;
474 #ifdef CONFIG_CRASH_DUMP
475 case KEXEC_TYPE_CRASH:
476 pages = kimage_alloc_crash_control_pages(image, order);
477 break;
478 #endif
479 }
480
481 return pages;
482 }
483
kimage_add_entry(struct kimage * image,kimage_entry_t entry)484 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
485 {
486 if (*image->entry != 0)
487 image->entry++;
488
489 if (image->entry == image->last_entry) {
490 kimage_entry_t *ind_page;
491 struct page *page;
492
493 page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
494 if (!page)
495 return -ENOMEM;
496
497 ind_page = page_address(page);
498 *image->entry = virt_to_boot_phys(ind_page) | IND_INDIRECTION;
499 image->entry = ind_page;
500 image->last_entry = ind_page +
501 ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
502 }
503 *image->entry = entry;
504 image->entry++;
505 *image->entry = 0;
506
507 return 0;
508 }
509
kimage_set_destination(struct kimage * image,unsigned long destination)510 static int kimage_set_destination(struct kimage *image,
511 unsigned long destination)
512 {
513 destination &= PAGE_MASK;
514
515 return kimage_add_entry(image, destination | IND_DESTINATION);
516 }
517
518
kimage_add_page(struct kimage * image,unsigned long page)519 static int kimage_add_page(struct kimage *image, unsigned long page)
520 {
521 page &= PAGE_MASK;
522
523 return kimage_add_entry(image, page | IND_SOURCE);
524 }
525
526
kimage_free_extra_pages(struct kimage * image)527 static void kimage_free_extra_pages(struct kimage *image)
528 {
529 /* Walk through and free any extra destination pages I may have */
530 kimage_free_page_list(&image->dest_pages);
531
532 /* Walk through and free any unusable pages I have cached */
533 kimage_free_page_list(&image->unusable_pages);
534
535 }
536
kimage_terminate(struct kimage * image)537 void kimage_terminate(struct kimage *image)
538 {
539 if (*image->entry != 0)
540 image->entry++;
541
542 *image->entry = IND_DONE;
543 }
544
545 #define for_each_kimage_entry(image, ptr, entry) \
546 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
547 ptr = (entry & IND_INDIRECTION) ? \
548 boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
549
kimage_free_entry(kimage_entry_t entry)550 static void kimage_free_entry(kimage_entry_t entry)
551 {
552 struct page *page;
553
554 page = boot_pfn_to_page(entry >> PAGE_SHIFT);
555 kimage_free_pages(page);
556 }
557
kimage_free_cma(struct kimage * image)558 static void kimage_free_cma(struct kimage *image)
559 {
560 unsigned long i;
561
562 for (i = 0; i < image->nr_segments; i++) {
563 struct page *cma = image->segment_cma[i];
564 u32 nr_pages = image->segment[i].memsz >> PAGE_SHIFT;
565
566 if (!cma)
567 continue;
568
569 arch_kexec_pre_free_pages(page_address(cma), nr_pages);
570 dma_release_from_contiguous(NULL, cma, nr_pages);
571 image->segment_cma[i] = NULL;
572 }
573
574 }
575
kimage_free(struct kimage * image)576 void kimage_free(struct kimage *image)
577 {
578 kimage_entry_t *ptr, entry;
579 kimage_entry_t ind = 0;
580
581 if (!image)
582 return;
583
584 #ifdef CONFIG_CRASH_DUMP
585 if (image->vmcoreinfo_data_copy) {
586 crash_update_vmcoreinfo_safecopy(NULL);
587 vunmap(image->vmcoreinfo_data_copy);
588 }
589 #endif
590
591 kimage_free_extra_pages(image);
592 for_each_kimage_entry(image, ptr, entry) {
593 if (entry & IND_INDIRECTION) {
594 /* Free the previous indirection page */
595 if (ind & IND_INDIRECTION)
596 kimage_free_entry(ind);
597 /* Save this indirection page until we are
598 * done with it.
599 */
600 ind = entry;
601 } else if (entry & IND_SOURCE)
602 kimage_free_entry(entry);
603 }
604 /* Free the final indirection page */
605 if (ind & IND_INDIRECTION)
606 kimage_free_entry(ind);
607
608 /* Handle any machine specific cleanup */
609 machine_kexec_cleanup(image);
610
611 /* Free the kexec control pages... */
612 kimage_free_page_list(&image->control_pages);
613
614 /* Free CMA allocations */
615 kimage_free_cma(image);
616
617 /*
618 * Free up any temporary buffers allocated. This might hit if
619 * error occurred much later after buffer allocation.
620 */
621 if (image->file_mode)
622 kimage_file_post_load_cleanup(image);
623
624 kfree(image);
625 }
626
kimage_dst_used(struct kimage * image,unsigned long page)627 static kimage_entry_t *kimage_dst_used(struct kimage *image,
628 unsigned long page)
629 {
630 kimage_entry_t *ptr, entry;
631 unsigned long destination = 0;
632
633 for_each_kimage_entry(image, ptr, entry) {
634 if (entry & IND_DESTINATION)
635 destination = entry & PAGE_MASK;
636 else if (entry & IND_SOURCE) {
637 if (page == destination)
638 return ptr;
639 destination += PAGE_SIZE;
640 }
641 }
642
643 return NULL;
644 }
645
kimage_alloc_page(struct kimage * image,gfp_t gfp_mask,unsigned long destination)646 static struct page *kimage_alloc_page(struct kimage *image,
647 gfp_t gfp_mask,
648 unsigned long destination)
649 {
650 /*
651 * Here we implement safeguards to ensure that a source page
652 * is not copied to its destination page before the data on
653 * the destination page is no longer useful.
654 *
655 * To do this we maintain the invariant that a source page is
656 * either its own destination page, or it is not a
657 * destination page at all.
658 *
659 * That is slightly stronger than required, but the proof
660 * that no problems will not occur is trivial, and the
661 * implementation is simply to verify.
662 *
663 * When allocating all pages normally this algorithm will run
664 * in O(N) time, but in the worst case it will run in O(N^2)
665 * time. If the runtime is a problem the data structures can
666 * be fixed.
667 */
668 struct page *page;
669 unsigned long addr;
670
671 /*
672 * Walk through the list of destination pages, and see if I
673 * have a match.
674 */
675 list_for_each_entry(page, &image->dest_pages, lru) {
676 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
677 if (addr == destination) {
678 list_del(&page->lru);
679 return page;
680 }
681 }
682 page = NULL;
683 while (1) {
684 kimage_entry_t *old;
685
686 /* Allocate a page, if we run out of memory give up */
687 page = kimage_alloc_pages(gfp_mask, 0);
688 if (!page)
689 return NULL;
690 /* If the page cannot be used file it away */
691 if (page_to_boot_pfn(page) >
692 (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
693 list_add(&page->lru, &image->unusable_pages);
694 continue;
695 }
696 addr = page_to_boot_pfn(page) << PAGE_SHIFT;
697
698 /* If it is the destination page we want use it */
699 if (addr == destination)
700 break;
701
702 /* If the page is not a destination page use it */
703 if (!kimage_is_destination_range(image, addr,
704 addr + PAGE_SIZE - 1))
705 break;
706
707 /*
708 * I know that the page is someones destination page.
709 * See if there is already a source page for this
710 * destination page. And if so swap the source pages.
711 */
712 old = kimage_dst_used(image, addr);
713 if (old) {
714 /* If so move it */
715 unsigned long old_addr;
716 struct page *old_page;
717
718 old_addr = *old & PAGE_MASK;
719 old_page = boot_pfn_to_page(old_addr >> PAGE_SHIFT);
720 copy_highpage(page, old_page);
721 *old = addr | (*old & ~PAGE_MASK);
722
723 /* The old page I have found cannot be a
724 * destination page, so return it if it's
725 * gfp_flags honor the ones passed in.
726 */
727 if (!(gfp_mask & __GFP_HIGHMEM) &&
728 PageHighMem(old_page)) {
729 kimage_free_pages(old_page);
730 continue;
731 }
732 page = old_page;
733 break;
734 }
735 /* Place the page on the destination list, to be used later */
736 list_add(&page->lru, &image->dest_pages);
737 }
738
739 return page;
740 }
741
kimage_load_cma_segment(struct kimage * image,int idx)742 static int kimage_load_cma_segment(struct kimage *image, int idx)
743 {
744 struct kexec_segment *segment = &image->segment[idx];
745 struct page *cma = image->segment_cma[idx];
746 char *ptr = page_address(cma);
747 size_t ubytes, mbytes;
748 int result = 0;
749 unsigned char __user *buf = NULL;
750 unsigned char *kbuf = NULL;
751
752 if (image->file_mode)
753 kbuf = segment->kbuf;
754 else
755 buf = segment->buf;
756 ubytes = segment->bufsz;
757 mbytes = segment->memsz;
758
759 /* Then copy from source buffer to the CMA one */
760 while (mbytes) {
761 size_t uchunk, mchunk;
762
763 mchunk = min_t(size_t, mbytes, PAGE_SIZE);
764 uchunk = min(ubytes, mchunk);
765
766 if (uchunk) {
767 /* For file based kexec, source pages are in kernel memory */
768 if (image->file_mode)
769 memcpy(ptr, kbuf, uchunk);
770 else
771 result = copy_from_user(ptr, buf, uchunk);
772 ubytes -= uchunk;
773 if (image->file_mode)
774 kbuf += uchunk;
775 else
776 buf += uchunk;
777 }
778
779 if (result) {
780 result = -EFAULT;
781 goto out;
782 }
783
784 ptr += mchunk;
785 mbytes -= mchunk;
786
787 cond_resched();
788 }
789
790 /* Clear any remainder */
791 memset(ptr, 0, mbytes);
792
793 out:
794 return result;
795 }
796
kimage_load_normal_segment(struct kimage * image,int idx)797 static int kimage_load_normal_segment(struct kimage *image, int idx)
798 {
799 struct kexec_segment *segment = &image->segment[idx];
800 unsigned long maddr;
801 size_t ubytes, mbytes;
802 int result;
803 unsigned char __user *buf = NULL;
804 unsigned char *kbuf = NULL;
805
806 if (image->file_mode)
807 kbuf = segment->kbuf;
808 else
809 buf = segment->buf;
810 ubytes = segment->bufsz;
811 mbytes = segment->memsz;
812 maddr = segment->mem;
813
814 if (image->segment_cma[idx])
815 return kimage_load_cma_segment(image, idx);
816
817 result = kimage_set_destination(image, maddr);
818 if (result < 0)
819 goto out;
820
821 while (mbytes) {
822 struct page *page;
823 char *ptr;
824 size_t uchunk, mchunk;
825
826 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
827 if (!page) {
828 result = -ENOMEM;
829 goto out;
830 }
831 result = kimage_add_page(image, page_to_boot_pfn(page)
832 << PAGE_SHIFT);
833 if (result < 0)
834 goto out;
835
836 ptr = kmap_local_page(page);
837 /* Start with a clear page */
838 clear_page(ptr);
839 mchunk = min_t(size_t, mbytes, PAGE_SIZE);
840 uchunk = min(ubytes, mchunk);
841
842 if (uchunk) {
843 /* For file based kexec, source pages are in kernel memory */
844 if (image->file_mode)
845 memcpy(ptr, kbuf, uchunk);
846 else
847 result = copy_from_user(ptr, buf, uchunk);
848 ubytes -= uchunk;
849 if (image->file_mode)
850 kbuf += uchunk;
851 else
852 buf += uchunk;
853 }
854 kunmap_local(ptr);
855 if (result) {
856 result = -EFAULT;
857 goto out;
858 }
859 maddr += mchunk;
860 mbytes -= mchunk;
861
862 cond_resched();
863 }
864 out:
865 return result;
866 }
867
868 #ifdef CONFIG_CRASH_DUMP
kimage_load_crash_segment(struct kimage * image,int idx)869 static int kimage_load_crash_segment(struct kimage *image, int idx)
870 {
871 /* For crash dumps kernels we simply copy the data from
872 * user space to it's destination.
873 * We do things a page at a time for the sake of kmap.
874 */
875 struct kexec_segment *segment = &image->segment[idx];
876 unsigned long maddr;
877 size_t ubytes, mbytes;
878 int result;
879 unsigned char __user *buf = NULL;
880 unsigned char *kbuf = NULL;
881
882 result = 0;
883 if (image->file_mode)
884 kbuf = segment->kbuf;
885 else
886 buf = segment->buf;
887 ubytes = segment->bufsz;
888 mbytes = segment->memsz;
889 maddr = segment->mem;
890 while (mbytes) {
891 struct page *page;
892 char *ptr;
893 size_t uchunk, mchunk;
894
895 page = boot_pfn_to_page(maddr >> PAGE_SHIFT);
896 if (!page) {
897 result = -ENOMEM;
898 goto out;
899 }
900 arch_kexec_post_alloc_pages(page_address(page), 1, 0);
901 ptr = kmap_local_page(page);
902 mchunk = min_t(size_t, mbytes, PAGE_SIZE);
903 uchunk = min(ubytes, mchunk);
904 if (mchunk > uchunk) {
905 /* Zero the trailing part of the page */
906 memset(ptr + uchunk, 0, mchunk - uchunk);
907 }
908
909 if (uchunk) {
910 /* For file based kexec, source pages are in kernel memory */
911 if (image->file_mode)
912 memcpy(ptr, kbuf, uchunk);
913 else
914 result = copy_from_user(ptr, buf, uchunk);
915 ubytes -= uchunk;
916 if (image->file_mode)
917 kbuf += uchunk;
918 else
919 buf += uchunk;
920 }
921 kexec_flush_icache_page(page);
922 kunmap_local(ptr);
923 arch_kexec_pre_free_pages(page_address(page), 1);
924 if (result) {
925 result = -EFAULT;
926 goto out;
927 }
928 maddr += mchunk;
929 mbytes -= mchunk;
930
931 cond_resched();
932 }
933 out:
934 return result;
935 }
936 #endif
937
kimage_load_segment(struct kimage * image,int idx)938 int kimage_load_segment(struct kimage *image, int idx)
939 {
940 int result = -ENOMEM;
941
942 switch (image->type) {
943 case KEXEC_TYPE_DEFAULT:
944 result = kimage_load_normal_segment(image, idx);
945 break;
946 #ifdef CONFIG_CRASH_DUMP
947 case KEXEC_TYPE_CRASH:
948 result = kimage_load_crash_segment(image, idx);
949 break;
950 #endif
951 }
952
953 return result;
954 }
955
kimage_map_segment(struct kimage * image,int idx)956 void *kimage_map_segment(struct kimage *image, int idx)
957 {
958 unsigned long addr, size, eaddr;
959 unsigned long src_page_addr, dest_page_addr = 0;
960 kimage_entry_t *ptr, entry;
961 struct page **src_pages;
962 unsigned int npages;
963 struct page *cma;
964 void *vaddr = NULL;
965 int i;
966
967 cma = image->segment_cma[idx];
968 if (cma)
969 return page_address(cma);
970
971 addr = image->segment[idx].mem;
972 size = image->segment[idx].memsz;
973 eaddr = addr + size;
974 /*
975 * Collect the source pages and map them in a contiguous VA range.
976 */
977 npages = PFN_UP(eaddr) - PFN_DOWN(addr);
978 src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
979 if (!src_pages) {
980 pr_err("Could not allocate ima pages array.\n");
981 return NULL;
982 }
983
984 i = 0;
985 for_each_kimage_entry(image, ptr, entry) {
986 if (entry & IND_DESTINATION) {
987 dest_page_addr = entry & PAGE_MASK;
988 } else if (entry & IND_SOURCE) {
989 if (dest_page_addr >= addr && dest_page_addr < eaddr) {
990 src_page_addr = entry & PAGE_MASK;
991 src_pages[i++] =
992 virt_to_page(__va(src_page_addr));
993 if (i == npages)
994 break;
995 dest_page_addr += PAGE_SIZE;
996 }
997 }
998 }
999
1000 /* Sanity check. */
1001 WARN_ON(i < npages);
1002
1003 vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
1004 kfree(src_pages);
1005
1006 if (!vaddr)
1007 pr_err("Could not map ima buffer.\n");
1008
1009 return vaddr;
1010 }
1011
kimage_unmap_segment(void * segment_buffer)1012 void kimage_unmap_segment(void *segment_buffer)
1013 {
1014 if (is_vmalloc_addr(segment_buffer))
1015 vunmap(segment_buffer);
1016 }
1017
1018 struct kexec_load_limit {
1019 /* Mutex protects the limit count. */
1020 struct mutex mutex;
1021 int limit;
1022 };
1023
1024 static struct kexec_load_limit load_limit_reboot = {
1025 .mutex = __MUTEX_INITIALIZER(load_limit_reboot.mutex),
1026 .limit = -1,
1027 };
1028
1029 static struct kexec_load_limit load_limit_panic = {
1030 .mutex = __MUTEX_INITIALIZER(load_limit_panic.mutex),
1031 .limit = -1,
1032 };
1033
1034 struct kimage *kexec_image;
1035 struct kimage *kexec_crash_image;
1036 static int kexec_load_disabled;
1037
1038 #ifdef CONFIG_SYSCTL
kexec_limit_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1039 static int kexec_limit_handler(const struct ctl_table *table, int write,
1040 void *buffer, size_t *lenp, loff_t *ppos)
1041 {
1042 struct kexec_load_limit *limit = table->data;
1043 int val;
1044 struct ctl_table tmp = {
1045 .data = &val,
1046 .maxlen = sizeof(val),
1047 .mode = table->mode,
1048 };
1049 int ret;
1050
1051 if (write) {
1052 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
1053 if (ret)
1054 return ret;
1055
1056 if (val < 0)
1057 return -EINVAL;
1058
1059 mutex_lock(&limit->mutex);
1060 if (limit->limit != -1 && val >= limit->limit)
1061 ret = -EINVAL;
1062 else
1063 limit->limit = val;
1064 mutex_unlock(&limit->mutex);
1065
1066 return ret;
1067 }
1068
1069 mutex_lock(&limit->mutex);
1070 val = limit->limit;
1071 mutex_unlock(&limit->mutex);
1072
1073 return proc_dointvec(&tmp, write, buffer, lenp, ppos);
1074 }
1075
1076 static const struct ctl_table kexec_core_sysctls[] = {
1077 {
1078 .procname = "kexec_load_disabled",
1079 .data = &kexec_load_disabled,
1080 .maxlen = sizeof(int),
1081 .mode = 0644,
1082 /* only handle a transition from default "0" to "1" */
1083 .proc_handler = proc_dointvec_minmax,
1084 .extra1 = SYSCTL_ONE,
1085 .extra2 = SYSCTL_ONE,
1086 },
1087 {
1088 .procname = "kexec_load_limit_panic",
1089 .data = &load_limit_panic,
1090 .mode = 0644,
1091 .proc_handler = kexec_limit_handler,
1092 },
1093 {
1094 .procname = "kexec_load_limit_reboot",
1095 .data = &load_limit_reboot,
1096 .mode = 0644,
1097 .proc_handler = kexec_limit_handler,
1098 },
1099 };
1100
kexec_core_sysctl_init(void)1101 static int __init kexec_core_sysctl_init(void)
1102 {
1103 register_sysctl_init("kernel", kexec_core_sysctls);
1104 return 0;
1105 }
1106 late_initcall(kexec_core_sysctl_init);
1107 #endif
1108
kexec_load_permitted(int kexec_image_type)1109 bool kexec_load_permitted(int kexec_image_type)
1110 {
1111 struct kexec_load_limit *limit;
1112
1113 /*
1114 * Only the superuser can use the kexec syscall and if it has not
1115 * been disabled.
1116 */
1117 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
1118 return false;
1119
1120 /* Check limit counter and decrease it.*/
1121 limit = (kexec_image_type == KEXEC_TYPE_CRASH) ?
1122 &load_limit_panic : &load_limit_reboot;
1123 mutex_lock(&limit->mutex);
1124 if (!limit->limit) {
1125 mutex_unlock(&limit->mutex);
1126 return false;
1127 }
1128 if (limit->limit != -1)
1129 limit->limit--;
1130 mutex_unlock(&limit->mutex);
1131
1132 return true;
1133 }
1134
1135 /*
1136 * Move into place and start executing a preloaded standalone
1137 * executable. If nothing was preloaded return an error.
1138 */
kernel_kexec(void)1139 int kernel_kexec(void)
1140 {
1141 int error = 0;
1142
1143 if (!kexec_trylock())
1144 return -EBUSY;
1145 if (!kexec_image) {
1146 error = -EINVAL;
1147 goto Unlock;
1148 }
1149
1150 error = liveupdate_reboot();
1151 if (error)
1152 goto Unlock;
1153
1154 #ifdef CONFIG_KEXEC_JUMP
1155 if (kexec_image->preserve_context) {
1156 /*
1157 * This flow is analogous to hibernation flows that occur
1158 * before creating an image and before jumping from the
1159 * restore kernel to the image one, so it uses the same
1160 * device callbacks as those two flows.
1161 */
1162 pm_prepare_console();
1163 error = freeze_processes();
1164 if (error) {
1165 error = -EBUSY;
1166 goto Restore_console;
1167 }
1168 console_suspend_all();
1169 error = dpm_suspend_start(PMSG_FREEZE);
1170 if (error)
1171 goto Resume_devices;
1172 /*
1173 * dpm_suspend_end() must be called after dpm_suspend_start()
1174 * to complete the transition, like in the hibernation flows
1175 * mentioned above.
1176 */
1177 error = dpm_suspend_end(PMSG_FREEZE);
1178 if (error)
1179 goto Resume_devices;
1180 error = suspend_disable_secondary_cpus();
1181 if (error)
1182 goto Enable_cpus;
1183 local_irq_disable();
1184 error = syscore_suspend();
1185 if (error)
1186 goto Enable_irqs;
1187 } else
1188 #endif
1189 {
1190 kexec_in_progress = true;
1191 kernel_restart_prepare("kexec reboot");
1192 migrate_to_reboot_cpu();
1193 syscore_shutdown();
1194
1195 /*
1196 * migrate_to_reboot_cpu() disables CPU hotplug assuming that
1197 * no further code needs to use CPU hotplug (which is true in
1198 * the reboot case). However, the kexec path depends on using
1199 * CPU hotplug again; so re-enable it here.
1200 */
1201 cpu_hotplug_enable();
1202 pr_notice("Starting new kernel\n");
1203 machine_shutdown();
1204 }
1205
1206 kmsg_dump(KMSG_DUMP_SHUTDOWN);
1207 machine_kexec(kexec_image);
1208
1209 #ifdef CONFIG_KEXEC_JUMP
1210 if (kexec_image->preserve_context) {
1211 /*
1212 * This flow is analogous to hibernation flows that occur after
1213 * creating an image and after the image kernel has got control
1214 * back, and in case the devices have been reset or otherwise
1215 * manipulated in the meantime, it uses the device callbacks
1216 * used by the latter.
1217 */
1218 syscore_resume();
1219 Enable_irqs:
1220 local_irq_enable();
1221 Enable_cpus:
1222 suspend_enable_secondary_cpus();
1223 dpm_resume_start(PMSG_RESTORE);
1224 Resume_devices:
1225 dpm_resume_end(PMSG_RESTORE);
1226 console_resume_all();
1227 thaw_processes();
1228 Restore_console:
1229 pm_restore_console();
1230 }
1231 #endif
1232
1233 Unlock:
1234 kexec_unlock();
1235 return error;
1236 }
1237
loaded_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1238 static ssize_t loaded_show(struct kobject *kobj,
1239 struct kobj_attribute *attr, char *buf)
1240 {
1241 return sysfs_emit(buf, "%d\n", !!kexec_image);
1242 }
1243 static struct kobj_attribute loaded_attr = __ATTR_RO(loaded);
1244
1245 #ifdef CONFIG_CRASH_DUMP
crash_loaded_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1246 static ssize_t crash_loaded_show(struct kobject *kobj,
1247 struct kobj_attribute *attr, char *buf)
1248 {
1249 return sysfs_emit(buf, "%d\n", kexec_crash_loaded());
1250 }
1251 static struct kobj_attribute crash_loaded_attr = __ATTR_RO(crash_loaded);
1252
1253 #ifdef CONFIG_CRASH_RESERVE
crash_cma_ranges_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1254 static ssize_t crash_cma_ranges_show(struct kobject *kobj,
1255 struct kobj_attribute *attr, char *buf)
1256 {
1257
1258 ssize_t len = 0;
1259 int i;
1260
1261 for (i = 0; i < crashk_cma_cnt; ++i) {
1262 len += sysfs_emit_at(buf, len, "%08llx-%08llx\n",
1263 crashk_cma_ranges[i].start,
1264 crashk_cma_ranges[i].end);
1265 }
1266 return len;
1267 }
1268 static struct kobj_attribute crash_cma_ranges_attr = __ATTR_RO(crash_cma_ranges);
1269 #endif
1270
crash_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1271 static ssize_t crash_size_show(struct kobject *kobj,
1272 struct kobj_attribute *attr, char *buf)
1273 {
1274 ssize_t size = crash_get_memory_size();
1275
1276 if (size < 0)
1277 return size;
1278
1279 return sysfs_emit(buf, "%zd\n", size);
1280 }
crash_size_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1281 static ssize_t crash_size_store(struct kobject *kobj,
1282 struct kobj_attribute *attr,
1283 const char *buf, size_t count)
1284 {
1285 unsigned long cnt;
1286 int ret;
1287
1288 if (kstrtoul(buf, 0, &cnt))
1289 return -EINVAL;
1290
1291 ret = crash_shrink_memory(cnt);
1292 return ret < 0 ? ret : count;
1293 }
1294 static struct kobj_attribute crash_size_attr = __ATTR_RW(crash_size);
1295
1296 #ifdef CONFIG_CRASH_HOTPLUG
crash_elfcorehdr_size_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1297 static ssize_t crash_elfcorehdr_size_show(struct kobject *kobj,
1298 struct kobj_attribute *attr, char *buf)
1299 {
1300 unsigned int sz = crash_get_elfcorehdr_size();
1301
1302 return sysfs_emit(buf, "%u\n", sz);
1303 }
1304 static struct kobj_attribute crash_elfcorehdr_size_attr = __ATTR_RO(crash_elfcorehdr_size);
1305
1306 #endif /* CONFIG_CRASH_HOTPLUG */
1307 #endif /* CONFIG_CRASH_DUMP */
1308
1309 static struct attribute *kexec_attrs[] = {
1310 &loaded_attr.attr,
1311 #ifdef CONFIG_CRASH_DUMP
1312 &crash_loaded_attr.attr,
1313 &crash_size_attr.attr,
1314 #ifdef CONFIG_CRASH_RESERVE
1315 &crash_cma_ranges_attr.attr,
1316 #endif
1317 #ifdef CONFIG_CRASH_HOTPLUG
1318 &crash_elfcorehdr_size_attr.attr,
1319 #endif
1320 #endif
1321 NULL
1322 };
1323
1324 struct kexec_link_entry {
1325 const char *target;
1326 const char *name;
1327 };
1328
1329 static struct kexec_link_entry kexec_links[] = {
1330 { "loaded", "kexec_loaded" },
1331 #ifdef CONFIG_CRASH_DUMP
1332 { "crash_loaded", "kexec_crash_loaded" },
1333 { "crash_size", "kexec_crash_size" },
1334 #ifdef CONFIG_CRASH_RESERVE
1335 {"crash_cma_ranges", "kexec_crash_cma_ranges"},
1336 #endif
1337 #ifdef CONFIG_CRASH_HOTPLUG
1338 { "crash_elfcorehdr_size", "crash_elfcorehdr_size" },
1339 #endif
1340 #endif
1341 };
1342
1343 static struct kobject *kexec_kobj;
1344 ATTRIBUTE_GROUPS(kexec);
1345
init_kexec_sysctl(void)1346 static int __init init_kexec_sysctl(void)
1347 {
1348 int error;
1349 int i;
1350
1351 kexec_kobj = kobject_create_and_add("kexec", kernel_kobj);
1352 if (!kexec_kobj) {
1353 pr_err("failed to create kexec kobject\n");
1354 return -ENOMEM;
1355 }
1356
1357 error = sysfs_create_groups(kexec_kobj, kexec_groups);
1358 if (error)
1359 goto kset_exit;
1360
1361 for (i = 0; i < ARRAY_SIZE(kexec_links); i++) {
1362 error = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, kexec_kobj,
1363 kexec_links[i].target,
1364 kexec_links[i].name);
1365 if (error)
1366 pr_err("Unable to create %s symlink (%d)", kexec_links[i].name, error);
1367 }
1368
1369 return 0;
1370
1371 kset_exit:
1372 kobject_put(kexec_kobj);
1373 return error;
1374 }
1375
1376 subsys_initcall(init_kexec_sysctl);
1377