xref: /linux/kernel/kexec.c (revision 17afab1de42236ee2f6235f4383cc6f3f13f8a10)
1 /*
2  * kexec.c - kexec system call
3  * Copyright (C) 2002-2004 Eric Biederman  <ebiederm@xmission.com>
4  *
5  * This source code is licensed under the GNU General Public License,
6  * Version 2.  See the file COPYING for more details.
7  */
8 
9 #include <linux/capability.h>
10 #include <linux/mm.h>
11 #include <linux/file.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14 #include <linux/kexec.h>
15 #include <linux/mutex.h>
16 #include <linux/list.h>
17 #include <linux/highmem.h>
18 #include <linux/syscalls.h>
19 #include <linux/reboot.h>
20 #include <linux/ioport.h>
21 #include <linux/hardirq.h>
22 #include <linux/elf.h>
23 #include <linux/elfcore.h>
24 #include <linux/utsname.h>
25 #include <linux/numa.h>
26 #include <linux/suspend.h>
27 #include <linux/device.h>
28 #include <linux/freezer.h>
29 #include <linux/pm.h>
30 #include <linux/cpu.h>
31 #include <linux/console.h>
32 #include <linux/vmalloc.h>
33 #include <linux/swap.h>
34 #include <linux/syscore_ops.h>
35 
36 #include <asm/page.h>
37 #include <asm/uaccess.h>
38 #include <asm/io.h>
39 #include <asm/sections.h>
40 
41 /* Per cpu memory for storing cpu states in case of system crash. */
42 note_buf_t __percpu *crash_notes;
43 
44 /* vmcoreinfo stuff */
45 static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
46 u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47 size_t vmcoreinfo_size;
48 size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
49 
50 /* Location of the reserved area for the crash kernel */
51 struct resource crashk_res = {
52 	.name  = "Crash kernel",
53 	.start = 0,
54 	.end   = 0,
55 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
56 };
57 struct resource crashk_low_res = {
58 	.name  = "Crash kernel",
59 	.start = 0,
60 	.end   = 0,
61 	.flags = IORESOURCE_BUSY | IORESOURCE_MEM
62 };
63 
64 int kexec_should_crash(struct task_struct *p)
65 {
66 	if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
67 		return 1;
68 	return 0;
69 }
70 
71 /*
72  * When kexec transitions to the new kernel there is a one-to-one
73  * mapping between physical and virtual addresses.  On processors
74  * where you can disable the MMU this is trivial, and easy.  For
75  * others it is still a simple predictable page table to setup.
76  *
77  * In that environment kexec copies the new kernel to its final
78  * resting place.  This means I can only support memory whose
79  * physical address can fit in an unsigned long.  In particular
80  * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled.
81  * If the assembly stub has more restrictive requirements
82  * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be
83  * defined more restrictively in <asm/kexec.h>.
84  *
85  * The code for the transition from the current kernel to the
86  * the new kernel is placed in the control_code_buffer, whose size
87  * is given by KEXEC_CONTROL_PAGE_SIZE.  In the best case only a single
88  * page of memory is necessary, but some architectures require more.
89  * Because this memory must be identity mapped in the transition from
90  * virtual to physical addresses it must live in the range
91  * 0 - TASK_SIZE, as only the user space mappings are arbitrarily
92  * modifiable.
93  *
94  * The assembly stub in the control code buffer is passed a linked list
95  * of descriptor pages detailing the source pages of the new kernel,
96  * and the destination addresses of those source pages.  As this data
97  * structure is not used in the context of the current OS, it must
98  * be self-contained.
99  *
100  * The code has been made to work with highmem pages and will use a
101  * destination page in its final resting place (if it happens
102  * to allocate it).  The end product of this is that most of the
103  * physical address space, and most of RAM can be used.
104  *
105  * Future directions include:
106  *  - allocating a page table with the control code buffer identity
107  *    mapped, to simplify machine_kexec and make kexec_on_panic more
108  *    reliable.
109  */
110 
111 /*
112  * KIMAGE_NO_DEST is an impossible destination address..., for
113  * allocating pages whose destination address we do not care about.
114  */
115 #define KIMAGE_NO_DEST (-1UL)
116 
117 static int kimage_is_destination_range(struct kimage *image,
118 				       unsigned long start, unsigned long end);
119 static struct page *kimage_alloc_page(struct kimage *image,
120 				       gfp_t gfp_mask,
121 				       unsigned long dest);
122 
123 static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
124 	                    unsigned long nr_segments,
125                             struct kexec_segment __user *segments)
126 {
127 	size_t segment_bytes;
128 	struct kimage *image;
129 	unsigned long i;
130 	int result;
131 
132 	/* Allocate a controlling structure */
133 	result = -ENOMEM;
134 	image = kzalloc(sizeof(*image), GFP_KERNEL);
135 	if (!image)
136 		goto out;
137 
138 	image->head = 0;
139 	image->entry = &image->head;
140 	image->last_entry = &image->head;
141 	image->control_page = ~0; /* By default this does not apply */
142 	image->start = entry;
143 	image->type = KEXEC_TYPE_DEFAULT;
144 
145 	/* Initialize the list of control pages */
146 	INIT_LIST_HEAD(&image->control_pages);
147 
148 	/* Initialize the list of destination pages */
149 	INIT_LIST_HEAD(&image->dest_pages);
150 
151 	/* Initialize the list of unusable pages */
152 	INIT_LIST_HEAD(&image->unuseable_pages);
153 
154 	/* Read in the segments */
155 	image->nr_segments = nr_segments;
156 	segment_bytes = nr_segments * sizeof(*segments);
157 	result = copy_from_user(image->segment, segments, segment_bytes);
158 	if (result) {
159 		result = -EFAULT;
160 		goto out;
161 	}
162 
163 	/*
164 	 * Verify we have good destination addresses.  The caller is
165 	 * responsible for making certain we don't attempt to load
166 	 * the new image into invalid or reserved areas of RAM.  This
167 	 * just verifies it is an address we can use.
168 	 *
169 	 * Since the kernel does everything in page size chunks ensure
170 	 * the destination addresses are page aligned.  Too many
171 	 * special cases crop of when we don't do this.  The most
172 	 * insidious is getting overlapping destination addresses
173 	 * simply because addresses are changed to page size
174 	 * granularity.
175 	 */
176 	result = -EADDRNOTAVAIL;
177 	for (i = 0; i < nr_segments; i++) {
178 		unsigned long mstart, mend;
179 
180 		mstart = image->segment[i].mem;
181 		mend   = mstart + image->segment[i].memsz;
182 		if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK))
183 			goto out;
184 		if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT)
185 			goto out;
186 	}
187 
188 	/* Verify our destination addresses do not overlap.
189 	 * If we alloed overlapping destination addresses
190 	 * through very weird things can happen with no
191 	 * easy explanation as one segment stops on another.
192 	 */
193 	result = -EINVAL;
194 	for (i = 0; i < nr_segments; i++) {
195 		unsigned long mstart, mend;
196 		unsigned long j;
197 
198 		mstart = image->segment[i].mem;
199 		mend   = mstart + image->segment[i].memsz;
200 		for (j = 0; j < i; j++) {
201 			unsigned long pstart, pend;
202 			pstart = image->segment[j].mem;
203 			pend   = pstart + image->segment[j].memsz;
204 			/* Do the segments overlap ? */
205 			if ((mend > pstart) && (mstart < pend))
206 				goto out;
207 		}
208 	}
209 
210 	/* Ensure our buffer sizes are strictly less than
211 	 * our memory sizes.  This should always be the case,
212 	 * and it is easier to check up front than to be surprised
213 	 * later on.
214 	 */
215 	result = -EINVAL;
216 	for (i = 0; i < nr_segments; i++) {
217 		if (image->segment[i].bufsz > image->segment[i].memsz)
218 			goto out;
219 	}
220 
221 	result = 0;
222 out:
223 	if (result == 0)
224 		*rimage = image;
225 	else
226 		kfree(image);
227 
228 	return result;
229 
230 }
231 
232 static void kimage_free_page_list(struct list_head *list);
233 
234 static int kimage_normal_alloc(struct kimage **rimage, unsigned long entry,
235 				unsigned long nr_segments,
236 				struct kexec_segment __user *segments)
237 {
238 	int result;
239 	struct kimage *image;
240 
241 	/* Allocate and initialize a controlling structure */
242 	image = NULL;
243 	result = do_kimage_alloc(&image, entry, nr_segments, segments);
244 	if (result)
245 		goto out;
246 
247 	/*
248 	 * Find a location for the control code buffer, and add it
249 	 * the vector of segments so that it's pages will also be
250 	 * counted as destination pages.
251 	 */
252 	result = -ENOMEM;
253 	image->control_code_page = kimage_alloc_control_pages(image,
254 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
255 	if (!image->control_code_page) {
256 		printk(KERN_ERR "Could not allocate control_code_buffer\n");
257 		goto out_free;
258 	}
259 
260 	image->swap_page = kimage_alloc_control_pages(image, 0);
261 	if (!image->swap_page) {
262 		printk(KERN_ERR "Could not allocate swap buffer\n");
263 		goto out_free;
264 	}
265 
266 	*rimage = image;
267 	return 0;
268 
269 out_free:
270 	kimage_free_page_list(&image->control_pages);
271 	kfree(image);
272 out:
273 	return result;
274 }
275 
276 static int kimage_crash_alloc(struct kimage **rimage, unsigned long entry,
277 				unsigned long nr_segments,
278 				struct kexec_segment __user *segments)
279 {
280 	int result;
281 	struct kimage *image;
282 	unsigned long i;
283 
284 	image = NULL;
285 	/* Verify we have a valid entry point */
286 	if ((entry < crashk_res.start) || (entry > crashk_res.end)) {
287 		result = -EADDRNOTAVAIL;
288 		goto out;
289 	}
290 
291 	/* Allocate and initialize a controlling structure */
292 	result = do_kimage_alloc(&image, entry, nr_segments, segments);
293 	if (result)
294 		goto out;
295 
296 	/* Enable the special crash kernel control page
297 	 * allocation policy.
298 	 */
299 	image->control_page = crashk_res.start;
300 	image->type = KEXEC_TYPE_CRASH;
301 
302 	/*
303 	 * Verify we have good destination addresses.  Normally
304 	 * the caller is responsible for making certain we don't
305 	 * attempt to load the new image into invalid or reserved
306 	 * areas of RAM.  But crash kernels are preloaded into a
307 	 * reserved area of ram.  We must ensure the addresses
308 	 * are in the reserved area otherwise preloading the
309 	 * kernel could corrupt things.
310 	 */
311 	result = -EADDRNOTAVAIL;
312 	for (i = 0; i < nr_segments; i++) {
313 		unsigned long mstart, mend;
314 
315 		mstart = image->segment[i].mem;
316 		mend = mstart + image->segment[i].memsz - 1;
317 		/* Ensure we are within the crash kernel limits */
318 		if ((mstart < crashk_res.start) || (mend > crashk_res.end))
319 			goto out_free;
320 	}
321 
322 	/*
323 	 * Find a location for the control code buffer, and add
324 	 * the vector of segments so that it's pages will also be
325 	 * counted as destination pages.
326 	 */
327 	result = -ENOMEM;
328 	image->control_code_page = kimage_alloc_control_pages(image,
329 					   get_order(KEXEC_CONTROL_PAGE_SIZE));
330 	if (!image->control_code_page) {
331 		printk(KERN_ERR "Could not allocate control_code_buffer\n");
332 		goto out_free;
333 	}
334 
335 	*rimage = image;
336 	return 0;
337 
338 out_free:
339 	kfree(image);
340 out:
341 	return result;
342 }
343 
344 static int kimage_is_destination_range(struct kimage *image,
345 					unsigned long start,
346 					unsigned long end)
347 {
348 	unsigned long i;
349 
350 	for (i = 0; i < image->nr_segments; i++) {
351 		unsigned long mstart, mend;
352 
353 		mstart = image->segment[i].mem;
354 		mend = mstart + image->segment[i].memsz;
355 		if ((end > mstart) && (start < mend))
356 			return 1;
357 	}
358 
359 	return 0;
360 }
361 
362 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
363 {
364 	struct page *pages;
365 
366 	pages = alloc_pages(gfp_mask, order);
367 	if (pages) {
368 		unsigned int count, i;
369 		pages->mapping = NULL;
370 		set_page_private(pages, order);
371 		count = 1 << order;
372 		for (i = 0; i < count; i++)
373 			SetPageReserved(pages + i);
374 	}
375 
376 	return pages;
377 }
378 
379 static void kimage_free_pages(struct page *page)
380 {
381 	unsigned int order, count, i;
382 
383 	order = page_private(page);
384 	count = 1 << order;
385 	for (i = 0; i < count; i++)
386 		ClearPageReserved(page + i);
387 	__free_pages(page, order);
388 }
389 
390 static void kimage_free_page_list(struct list_head *list)
391 {
392 	struct list_head *pos, *next;
393 
394 	list_for_each_safe(pos, next, list) {
395 		struct page *page;
396 
397 		page = list_entry(pos, struct page, lru);
398 		list_del(&page->lru);
399 		kimage_free_pages(page);
400 	}
401 }
402 
403 static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
404 							unsigned int order)
405 {
406 	/* Control pages are special, they are the intermediaries
407 	 * that are needed while we copy the rest of the pages
408 	 * to their final resting place.  As such they must
409 	 * not conflict with either the destination addresses
410 	 * or memory the kernel is already using.
411 	 *
412 	 * The only case where we really need more than one of
413 	 * these are for architectures where we cannot disable
414 	 * the MMU and must instead generate an identity mapped
415 	 * page table for all of the memory.
416 	 *
417 	 * At worst this runs in O(N) of the image size.
418 	 */
419 	struct list_head extra_pages;
420 	struct page *pages;
421 	unsigned int count;
422 
423 	count = 1 << order;
424 	INIT_LIST_HEAD(&extra_pages);
425 
426 	/* Loop while I can allocate a page and the page allocated
427 	 * is a destination page.
428 	 */
429 	do {
430 		unsigned long pfn, epfn, addr, eaddr;
431 
432 		pages = kimage_alloc_pages(GFP_KERNEL, order);
433 		if (!pages)
434 			break;
435 		pfn   = page_to_pfn(pages);
436 		epfn  = pfn + count;
437 		addr  = pfn << PAGE_SHIFT;
438 		eaddr = epfn << PAGE_SHIFT;
439 		if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) ||
440 			      kimage_is_destination_range(image, addr, eaddr)) {
441 			list_add(&pages->lru, &extra_pages);
442 			pages = NULL;
443 		}
444 	} while (!pages);
445 
446 	if (pages) {
447 		/* Remember the allocated page... */
448 		list_add(&pages->lru, &image->control_pages);
449 
450 		/* Because the page is already in it's destination
451 		 * location we will never allocate another page at
452 		 * that address.  Therefore kimage_alloc_pages
453 		 * will not return it (again) and we don't need
454 		 * to give it an entry in image->segment[].
455 		 */
456 	}
457 	/* Deal with the destination pages I have inadvertently allocated.
458 	 *
459 	 * Ideally I would convert multi-page allocations into single
460 	 * page allocations, and add everything to image->dest_pages.
461 	 *
462 	 * For now it is simpler to just free the pages.
463 	 */
464 	kimage_free_page_list(&extra_pages);
465 
466 	return pages;
467 }
468 
469 static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
470 						      unsigned int order)
471 {
472 	/* Control pages are special, they are the intermediaries
473 	 * that are needed while we copy the rest of the pages
474 	 * to their final resting place.  As such they must
475 	 * not conflict with either the destination addresses
476 	 * or memory the kernel is already using.
477 	 *
478 	 * Control pages are also the only pags we must allocate
479 	 * when loading a crash kernel.  All of the other pages
480 	 * are specified by the segments and we just memcpy
481 	 * into them directly.
482 	 *
483 	 * The only case where we really need more than one of
484 	 * these are for architectures where we cannot disable
485 	 * the MMU and must instead generate an identity mapped
486 	 * page table for all of the memory.
487 	 *
488 	 * Given the low demand this implements a very simple
489 	 * allocator that finds the first hole of the appropriate
490 	 * size in the reserved memory region, and allocates all
491 	 * of the memory up to and including the hole.
492 	 */
493 	unsigned long hole_start, hole_end, size;
494 	struct page *pages;
495 
496 	pages = NULL;
497 	size = (1 << order) << PAGE_SHIFT;
498 	hole_start = (image->control_page + (size - 1)) & ~(size - 1);
499 	hole_end   = hole_start + size - 1;
500 	while (hole_end <= crashk_res.end) {
501 		unsigned long i;
502 
503 		if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT)
504 			break;
505 		/* See if I overlap any of the segments */
506 		for (i = 0; i < image->nr_segments; i++) {
507 			unsigned long mstart, mend;
508 
509 			mstart = image->segment[i].mem;
510 			mend   = mstart + image->segment[i].memsz - 1;
511 			if ((hole_end >= mstart) && (hole_start <= mend)) {
512 				/* Advance the hole to the end of the segment */
513 				hole_start = (mend + (size - 1)) & ~(size - 1);
514 				hole_end   = hole_start + size - 1;
515 				break;
516 			}
517 		}
518 		/* If I don't overlap any segments I have found my hole! */
519 		if (i == image->nr_segments) {
520 			pages = pfn_to_page(hole_start >> PAGE_SHIFT);
521 			break;
522 		}
523 	}
524 	if (pages)
525 		image->control_page = hole_end;
526 
527 	return pages;
528 }
529 
530 
531 struct page *kimage_alloc_control_pages(struct kimage *image,
532 					 unsigned int order)
533 {
534 	struct page *pages = NULL;
535 
536 	switch (image->type) {
537 	case KEXEC_TYPE_DEFAULT:
538 		pages = kimage_alloc_normal_control_pages(image, order);
539 		break;
540 	case KEXEC_TYPE_CRASH:
541 		pages = kimage_alloc_crash_control_pages(image, order);
542 		break;
543 	}
544 
545 	return pages;
546 }
547 
548 static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
549 {
550 	if (*image->entry != 0)
551 		image->entry++;
552 
553 	if (image->entry == image->last_entry) {
554 		kimage_entry_t *ind_page;
555 		struct page *page;
556 
557 		page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST);
558 		if (!page)
559 			return -ENOMEM;
560 
561 		ind_page = page_address(page);
562 		*image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
563 		image->entry = ind_page;
564 		image->last_entry = ind_page +
565 				      ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
566 	}
567 	*image->entry = entry;
568 	image->entry++;
569 	*image->entry = 0;
570 
571 	return 0;
572 }
573 
574 static int kimage_set_destination(struct kimage *image,
575 				   unsigned long destination)
576 {
577 	int result;
578 
579 	destination &= PAGE_MASK;
580 	result = kimage_add_entry(image, destination | IND_DESTINATION);
581 	if (result == 0)
582 		image->destination = destination;
583 
584 	return result;
585 }
586 
587 
588 static int kimage_add_page(struct kimage *image, unsigned long page)
589 {
590 	int result;
591 
592 	page &= PAGE_MASK;
593 	result = kimage_add_entry(image, page | IND_SOURCE);
594 	if (result == 0)
595 		image->destination += PAGE_SIZE;
596 
597 	return result;
598 }
599 
600 
601 static void kimage_free_extra_pages(struct kimage *image)
602 {
603 	/* Walk through and free any extra destination pages I may have */
604 	kimage_free_page_list(&image->dest_pages);
605 
606 	/* Walk through and free any unusable pages I have cached */
607 	kimage_free_page_list(&image->unuseable_pages);
608 
609 }
610 static void kimage_terminate(struct kimage *image)
611 {
612 	if (*image->entry != 0)
613 		image->entry++;
614 
615 	*image->entry = IND_DONE;
616 }
617 
618 #define for_each_kimage_entry(image, ptr, entry) \
619 	for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
620 		ptr = (entry & IND_INDIRECTION)? \
621 			phys_to_virt((entry & PAGE_MASK)): ptr +1)
622 
623 static void kimage_free_entry(kimage_entry_t entry)
624 {
625 	struct page *page;
626 
627 	page = pfn_to_page(entry >> PAGE_SHIFT);
628 	kimage_free_pages(page);
629 }
630 
631 static void kimage_free(struct kimage *image)
632 {
633 	kimage_entry_t *ptr, entry;
634 	kimage_entry_t ind = 0;
635 
636 	if (!image)
637 		return;
638 
639 	kimage_free_extra_pages(image);
640 	for_each_kimage_entry(image, ptr, entry) {
641 		if (entry & IND_INDIRECTION) {
642 			/* Free the previous indirection page */
643 			if (ind & IND_INDIRECTION)
644 				kimage_free_entry(ind);
645 			/* Save this indirection page until we are
646 			 * done with it.
647 			 */
648 			ind = entry;
649 		}
650 		else if (entry & IND_SOURCE)
651 			kimage_free_entry(entry);
652 	}
653 	/* Free the final indirection page */
654 	if (ind & IND_INDIRECTION)
655 		kimage_free_entry(ind);
656 
657 	/* Handle any machine specific cleanup */
658 	machine_kexec_cleanup(image);
659 
660 	/* Free the kexec control pages... */
661 	kimage_free_page_list(&image->control_pages);
662 	kfree(image);
663 }
664 
665 static kimage_entry_t *kimage_dst_used(struct kimage *image,
666 					unsigned long page)
667 {
668 	kimage_entry_t *ptr, entry;
669 	unsigned long destination = 0;
670 
671 	for_each_kimage_entry(image, ptr, entry) {
672 		if (entry & IND_DESTINATION)
673 			destination = entry & PAGE_MASK;
674 		else if (entry & IND_SOURCE) {
675 			if (page == destination)
676 				return ptr;
677 			destination += PAGE_SIZE;
678 		}
679 	}
680 
681 	return NULL;
682 }
683 
684 static struct page *kimage_alloc_page(struct kimage *image,
685 					gfp_t gfp_mask,
686 					unsigned long destination)
687 {
688 	/*
689 	 * Here we implement safeguards to ensure that a source page
690 	 * is not copied to its destination page before the data on
691 	 * the destination page is no longer useful.
692 	 *
693 	 * To do this we maintain the invariant that a source page is
694 	 * either its own destination page, or it is not a
695 	 * destination page at all.
696 	 *
697 	 * That is slightly stronger than required, but the proof
698 	 * that no problems will not occur is trivial, and the
699 	 * implementation is simply to verify.
700 	 *
701 	 * When allocating all pages normally this algorithm will run
702 	 * in O(N) time, but in the worst case it will run in O(N^2)
703 	 * time.   If the runtime is a problem the data structures can
704 	 * be fixed.
705 	 */
706 	struct page *page;
707 	unsigned long addr;
708 
709 	/*
710 	 * Walk through the list of destination pages, and see if I
711 	 * have a match.
712 	 */
713 	list_for_each_entry(page, &image->dest_pages, lru) {
714 		addr = page_to_pfn(page) << PAGE_SHIFT;
715 		if (addr == destination) {
716 			list_del(&page->lru);
717 			return page;
718 		}
719 	}
720 	page = NULL;
721 	while (1) {
722 		kimage_entry_t *old;
723 
724 		/* Allocate a page, if we run out of memory give up */
725 		page = kimage_alloc_pages(gfp_mask, 0);
726 		if (!page)
727 			return NULL;
728 		/* If the page cannot be used file it away */
729 		if (page_to_pfn(page) >
730 				(KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
731 			list_add(&page->lru, &image->unuseable_pages);
732 			continue;
733 		}
734 		addr = page_to_pfn(page) << PAGE_SHIFT;
735 
736 		/* If it is the destination page we want use it */
737 		if (addr == destination)
738 			break;
739 
740 		/* If the page is not a destination page use it */
741 		if (!kimage_is_destination_range(image, addr,
742 						  addr + PAGE_SIZE))
743 			break;
744 
745 		/*
746 		 * I know that the page is someones destination page.
747 		 * See if there is already a source page for this
748 		 * destination page.  And if so swap the source pages.
749 		 */
750 		old = kimage_dst_used(image, addr);
751 		if (old) {
752 			/* If so move it */
753 			unsigned long old_addr;
754 			struct page *old_page;
755 
756 			old_addr = *old & PAGE_MASK;
757 			old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
758 			copy_highpage(page, old_page);
759 			*old = addr | (*old & ~PAGE_MASK);
760 
761 			/* The old page I have found cannot be a
762 			 * destination page, so return it if it's
763 			 * gfp_flags honor the ones passed in.
764 			 */
765 			if (!(gfp_mask & __GFP_HIGHMEM) &&
766 			    PageHighMem(old_page)) {
767 				kimage_free_pages(old_page);
768 				continue;
769 			}
770 			addr = old_addr;
771 			page = old_page;
772 			break;
773 		}
774 		else {
775 			/* Place the page on the destination list I
776 			 * will use it later.
777 			 */
778 			list_add(&page->lru, &image->dest_pages);
779 		}
780 	}
781 
782 	return page;
783 }
784 
785 static int kimage_load_normal_segment(struct kimage *image,
786 					 struct kexec_segment *segment)
787 {
788 	unsigned long maddr;
789 	unsigned long ubytes, mbytes;
790 	int result;
791 	unsigned char __user *buf;
792 
793 	result = 0;
794 	buf = segment->buf;
795 	ubytes = segment->bufsz;
796 	mbytes = segment->memsz;
797 	maddr = segment->mem;
798 
799 	result = kimage_set_destination(image, maddr);
800 	if (result < 0)
801 		goto out;
802 
803 	while (mbytes) {
804 		struct page *page;
805 		char *ptr;
806 		size_t uchunk, mchunk;
807 
808 		page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
809 		if (!page) {
810 			result  = -ENOMEM;
811 			goto out;
812 		}
813 		result = kimage_add_page(image, page_to_pfn(page)
814 								<< PAGE_SHIFT);
815 		if (result < 0)
816 			goto out;
817 
818 		ptr = kmap(page);
819 		/* Start with a clear page */
820 		clear_page(ptr);
821 		ptr += maddr & ~PAGE_MASK;
822 		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
823 		if (mchunk > mbytes)
824 			mchunk = mbytes;
825 
826 		uchunk = mchunk;
827 		if (uchunk > ubytes)
828 			uchunk = ubytes;
829 
830 		result = copy_from_user(ptr, buf, uchunk);
831 		kunmap(page);
832 		if (result) {
833 			result = -EFAULT;
834 			goto out;
835 		}
836 		ubytes -= uchunk;
837 		maddr  += mchunk;
838 		buf    += mchunk;
839 		mbytes -= mchunk;
840 	}
841 out:
842 	return result;
843 }
844 
845 static int kimage_load_crash_segment(struct kimage *image,
846 					struct kexec_segment *segment)
847 {
848 	/* For crash dumps kernels we simply copy the data from
849 	 * user space to it's destination.
850 	 * We do things a page at a time for the sake of kmap.
851 	 */
852 	unsigned long maddr;
853 	unsigned long ubytes, mbytes;
854 	int result;
855 	unsigned char __user *buf;
856 
857 	result = 0;
858 	buf = segment->buf;
859 	ubytes = segment->bufsz;
860 	mbytes = segment->memsz;
861 	maddr = segment->mem;
862 	while (mbytes) {
863 		struct page *page;
864 		char *ptr;
865 		size_t uchunk, mchunk;
866 
867 		page = pfn_to_page(maddr >> PAGE_SHIFT);
868 		if (!page) {
869 			result  = -ENOMEM;
870 			goto out;
871 		}
872 		ptr = kmap(page);
873 		ptr += maddr & ~PAGE_MASK;
874 		mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK);
875 		if (mchunk > mbytes)
876 			mchunk = mbytes;
877 
878 		uchunk = mchunk;
879 		if (uchunk > ubytes) {
880 			uchunk = ubytes;
881 			/* Zero the trailing part of the page */
882 			memset(ptr + uchunk, 0, mchunk - uchunk);
883 		}
884 		result = copy_from_user(ptr, buf, uchunk);
885 		kexec_flush_icache_page(page);
886 		kunmap(page);
887 		if (result) {
888 			result = -EFAULT;
889 			goto out;
890 		}
891 		ubytes -= uchunk;
892 		maddr  += mchunk;
893 		buf    += mchunk;
894 		mbytes -= mchunk;
895 	}
896 out:
897 	return result;
898 }
899 
900 static int kimage_load_segment(struct kimage *image,
901 				struct kexec_segment *segment)
902 {
903 	int result = -ENOMEM;
904 
905 	switch (image->type) {
906 	case KEXEC_TYPE_DEFAULT:
907 		result = kimage_load_normal_segment(image, segment);
908 		break;
909 	case KEXEC_TYPE_CRASH:
910 		result = kimage_load_crash_segment(image, segment);
911 		break;
912 	}
913 
914 	return result;
915 }
916 
917 /*
918  * Exec Kernel system call: for obvious reasons only root may call it.
919  *
920  * This call breaks up into three pieces.
921  * - A generic part which loads the new kernel from the current
922  *   address space, and very carefully places the data in the
923  *   allocated pages.
924  *
925  * - A generic part that interacts with the kernel and tells all of
926  *   the devices to shut down.  Preventing on-going dmas, and placing
927  *   the devices in a consistent state so a later kernel can
928  *   reinitialize them.
929  *
930  * - A machine specific part that includes the syscall number
931  *   and the copies the image to it's final destination.  And
932  *   jumps into the image at entry.
933  *
934  * kexec does not sync, or unmount filesystems so if you need
935  * that to happen you need to do that yourself.
936  */
937 struct kimage *kexec_image;
938 struct kimage *kexec_crash_image;
939 
940 static DEFINE_MUTEX(kexec_mutex);
941 
942 SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
943 		struct kexec_segment __user *, segments, unsigned long, flags)
944 {
945 	struct kimage **dest_image, *image;
946 	int result;
947 
948 	/* We only trust the superuser with rebooting the system. */
949 	if (!capable(CAP_SYS_BOOT))
950 		return -EPERM;
951 
952 	/*
953 	 * Verify we have a legal set of flags
954 	 * This leaves us room for future extensions.
955 	 */
956 	if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK))
957 		return -EINVAL;
958 
959 	/* Verify we are on the appropriate architecture */
960 	if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) &&
961 		((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT))
962 		return -EINVAL;
963 
964 	/* Put an artificial cap on the number
965 	 * of segments passed to kexec_load.
966 	 */
967 	if (nr_segments > KEXEC_SEGMENT_MAX)
968 		return -EINVAL;
969 
970 	image = NULL;
971 	result = 0;
972 
973 	/* Because we write directly to the reserved memory
974 	 * region when loading crash kernels we need a mutex here to
975 	 * prevent multiple crash  kernels from attempting to load
976 	 * simultaneously, and to prevent a crash kernel from loading
977 	 * over the top of a in use crash kernel.
978 	 *
979 	 * KISS: always take the mutex.
980 	 */
981 	if (!mutex_trylock(&kexec_mutex))
982 		return -EBUSY;
983 
984 	dest_image = &kexec_image;
985 	if (flags & KEXEC_ON_CRASH)
986 		dest_image = &kexec_crash_image;
987 	if (nr_segments > 0) {
988 		unsigned long i;
989 
990 		/* Loading another kernel to reboot into */
991 		if ((flags & KEXEC_ON_CRASH) == 0)
992 			result = kimage_normal_alloc(&image, entry,
993 							nr_segments, segments);
994 		/* Loading another kernel to switch to if this one crashes */
995 		else if (flags & KEXEC_ON_CRASH) {
996 			/* Free any current crash dump kernel before
997 			 * we corrupt it.
998 			 */
999 			kimage_free(xchg(&kexec_crash_image, NULL));
1000 			result = kimage_crash_alloc(&image, entry,
1001 						     nr_segments, segments);
1002 			crash_map_reserved_pages();
1003 		}
1004 		if (result)
1005 			goto out;
1006 
1007 		if (flags & KEXEC_PRESERVE_CONTEXT)
1008 			image->preserve_context = 1;
1009 		result = machine_kexec_prepare(image);
1010 		if (result)
1011 			goto out;
1012 
1013 		for (i = 0; i < nr_segments; i++) {
1014 			result = kimage_load_segment(image, &image->segment[i]);
1015 			if (result)
1016 				goto out;
1017 		}
1018 		kimage_terminate(image);
1019 		if (flags & KEXEC_ON_CRASH)
1020 			crash_unmap_reserved_pages();
1021 	}
1022 	/* Install the new kernel, and  Uninstall the old */
1023 	image = xchg(dest_image, image);
1024 
1025 out:
1026 	mutex_unlock(&kexec_mutex);
1027 	kimage_free(image);
1028 
1029 	return result;
1030 }
1031 
1032 /*
1033  * Add and remove page tables for crashkernel memory
1034  *
1035  * Provide an empty default implementation here -- architecture
1036  * code may override this
1037  */
1038 void __weak crash_map_reserved_pages(void)
1039 {}
1040 
1041 void __weak crash_unmap_reserved_pages(void)
1042 {}
1043 
1044 #ifdef CONFIG_COMPAT
1045 asmlinkage long compat_sys_kexec_load(unsigned long entry,
1046 				unsigned long nr_segments,
1047 				struct compat_kexec_segment __user *segments,
1048 				unsigned long flags)
1049 {
1050 	struct compat_kexec_segment in;
1051 	struct kexec_segment out, __user *ksegments;
1052 	unsigned long i, result;
1053 
1054 	/* Don't allow clients that don't understand the native
1055 	 * architecture to do anything.
1056 	 */
1057 	if ((flags & KEXEC_ARCH_MASK) == KEXEC_ARCH_DEFAULT)
1058 		return -EINVAL;
1059 
1060 	if (nr_segments > KEXEC_SEGMENT_MAX)
1061 		return -EINVAL;
1062 
1063 	ksegments = compat_alloc_user_space(nr_segments * sizeof(out));
1064 	for (i=0; i < nr_segments; i++) {
1065 		result = copy_from_user(&in, &segments[i], sizeof(in));
1066 		if (result)
1067 			return -EFAULT;
1068 
1069 		out.buf   = compat_ptr(in.buf);
1070 		out.bufsz = in.bufsz;
1071 		out.mem   = in.mem;
1072 		out.memsz = in.memsz;
1073 
1074 		result = copy_to_user(&ksegments[i], &out, sizeof(out));
1075 		if (result)
1076 			return -EFAULT;
1077 	}
1078 
1079 	return sys_kexec_load(entry, nr_segments, ksegments, flags);
1080 }
1081 #endif
1082 
1083 void crash_kexec(struct pt_regs *regs)
1084 {
1085 	/* Take the kexec_mutex here to prevent sys_kexec_load
1086 	 * running on one cpu from replacing the crash kernel
1087 	 * we are using after a panic on a different cpu.
1088 	 *
1089 	 * If the crash kernel was not located in a fixed area
1090 	 * of memory the xchg(&kexec_crash_image) would be
1091 	 * sufficient.  But since I reuse the memory...
1092 	 */
1093 	if (mutex_trylock(&kexec_mutex)) {
1094 		if (kexec_crash_image) {
1095 			struct pt_regs fixed_regs;
1096 
1097 			crash_setup_regs(&fixed_regs, regs);
1098 			crash_save_vmcoreinfo();
1099 			machine_crash_shutdown(&fixed_regs);
1100 			machine_kexec(kexec_crash_image);
1101 		}
1102 		mutex_unlock(&kexec_mutex);
1103 	}
1104 }
1105 
1106 size_t crash_get_memory_size(void)
1107 {
1108 	size_t size = 0;
1109 	mutex_lock(&kexec_mutex);
1110 	if (crashk_res.end != crashk_res.start)
1111 		size = resource_size(&crashk_res);
1112 	mutex_unlock(&kexec_mutex);
1113 	return size;
1114 }
1115 
1116 void __weak crash_free_reserved_phys_range(unsigned long begin,
1117 					   unsigned long end)
1118 {
1119 	unsigned long addr;
1120 
1121 	for (addr = begin; addr < end; addr += PAGE_SIZE)
1122 		free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT));
1123 }
1124 
1125 int crash_shrink_memory(unsigned long new_size)
1126 {
1127 	int ret = 0;
1128 	unsigned long start, end;
1129 	unsigned long old_size;
1130 	struct resource *ram_res;
1131 
1132 	mutex_lock(&kexec_mutex);
1133 
1134 	if (kexec_crash_image) {
1135 		ret = -ENOENT;
1136 		goto unlock;
1137 	}
1138 	start = crashk_res.start;
1139 	end = crashk_res.end;
1140 	old_size = (end == 0) ? 0 : end - start + 1;
1141 	if (new_size >= old_size) {
1142 		ret = (new_size == old_size) ? 0 : -EINVAL;
1143 		goto unlock;
1144 	}
1145 
1146 	ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL);
1147 	if (!ram_res) {
1148 		ret = -ENOMEM;
1149 		goto unlock;
1150 	}
1151 
1152 	start = roundup(start, KEXEC_CRASH_MEM_ALIGN);
1153 	end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN);
1154 
1155 	crash_map_reserved_pages();
1156 	crash_free_reserved_phys_range(end, crashk_res.end);
1157 
1158 	if ((start == end) && (crashk_res.parent != NULL))
1159 		release_resource(&crashk_res);
1160 
1161 	ram_res->start = end;
1162 	ram_res->end = crashk_res.end;
1163 	ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
1164 	ram_res->name = "System RAM";
1165 
1166 	crashk_res.end = end - 1;
1167 
1168 	insert_resource(&iomem_resource, ram_res);
1169 	crash_unmap_reserved_pages();
1170 
1171 unlock:
1172 	mutex_unlock(&kexec_mutex);
1173 	return ret;
1174 }
1175 
1176 static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data,
1177 			    size_t data_len)
1178 {
1179 	struct elf_note note;
1180 
1181 	note.n_namesz = strlen(name) + 1;
1182 	note.n_descsz = data_len;
1183 	note.n_type   = type;
1184 	memcpy(buf, &note, sizeof(note));
1185 	buf += (sizeof(note) + 3)/4;
1186 	memcpy(buf, name, note.n_namesz);
1187 	buf += (note.n_namesz + 3)/4;
1188 	memcpy(buf, data, note.n_descsz);
1189 	buf += (note.n_descsz + 3)/4;
1190 
1191 	return buf;
1192 }
1193 
1194 static void final_note(u32 *buf)
1195 {
1196 	struct elf_note note;
1197 
1198 	note.n_namesz = 0;
1199 	note.n_descsz = 0;
1200 	note.n_type   = 0;
1201 	memcpy(buf, &note, sizeof(note));
1202 }
1203 
1204 void crash_save_cpu(struct pt_regs *regs, int cpu)
1205 {
1206 	struct elf_prstatus prstatus;
1207 	u32 *buf;
1208 
1209 	if ((cpu < 0) || (cpu >= nr_cpu_ids))
1210 		return;
1211 
1212 	/* Using ELF notes here is opportunistic.
1213 	 * I need a well defined structure format
1214 	 * for the data I pass, and I need tags
1215 	 * on the data to indicate what information I have
1216 	 * squirrelled away.  ELF notes happen to provide
1217 	 * all of that, so there is no need to invent something new.
1218 	 */
1219 	buf = (u32*)per_cpu_ptr(crash_notes, cpu);
1220 	if (!buf)
1221 		return;
1222 	memset(&prstatus, 0, sizeof(prstatus));
1223 	prstatus.pr_pid = current->pid;
1224 	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1225 	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1226 		      	      &prstatus, sizeof(prstatus));
1227 	final_note(buf);
1228 }
1229 
1230 static int __init crash_notes_memory_init(void)
1231 {
1232 	/* Allocate memory for saving cpu registers. */
1233 	crash_notes = alloc_percpu(note_buf_t);
1234 	if (!crash_notes) {
1235 		printk("Kexec: Memory allocation for saving cpu register"
1236 		" states failed\n");
1237 		return -ENOMEM;
1238 	}
1239 	return 0;
1240 }
1241 module_init(crash_notes_memory_init)
1242 
1243 
1244 /*
1245  * parsing the "crashkernel" commandline
1246  *
1247  * this code is intended to be called from architecture specific code
1248  */
1249 
1250 
1251 /*
1252  * This function parses command lines in the format
1253  *
1254  *   crashkernel=ramsize-range:size[,...][@offset]
1255  *
1256  * The function returns 0 on success and -EINVAL on failure.
1257  */
1258 static int __init parse_crashkernel_mem(char 			*cmdline,
1259 					unsigned long long	system_ram,
1260 					unsigned long long	*crash_size,
1261 					unsigned long long	*crash_base)
1262 {
1263 	char *cur = cmdline, *tmp;
1264 
1265 	/* for each entry of the comma-separated list */
1266 	do {
1267 		unsigned long long start, end = ULLONG_MAX, size;
1268 
1269 		/* get the start of the range */
1270 		start = memparse(cur, &tmp);
1271 		if (cur == tmp) {
1272 			pr_warning("crashkernel: Memory value expected\n");
1273 			return -EINVAL;
1274 		}
1275 		cur = tmp;
1276 		if (*cur != '-') {
1277 			pr_warning("crashkernel: '-' expected\n");
1278 			return -EINVAL;
1279 		}
1280 		cur++;
1281 
1282 		/* if no ':' is here, than we read the end */
1283 		if (*cur != ':') {
1284 			end = memparse(cur, &tmp);
1285 			if (cur == tmp) {
1286 				pr_warning("crashkernel: Memory "
1287 						"value expected\n");
1288 				return -EINVAL;
1289 			}
1290 			cur = tmp;
1291 			if (end <= start) {
1292 				pr_warning("crashkernel: end <= start\n");
1293 				return -EINVAL;
1294 			}
1295 		}
1296 
1297 		if (*cur != ':') {
1298 			pr_warning("crashkernel: ':' expected\n");
1299 			return -EINVAL;
1300 		}
1301 		cur++;
1302 
1303 		size = memparse(cur, &tmp);
1304 		if (cur == tmp) {
1305 			pr_warning("Memory value expected\n");
1306 			return -EINVAL;
1307 		}
1308 		cur = tmp;
1309 		if (size >= system_ram) {
1310 			pr_warning("crashkernel: invalid size\n");
1311 			return -EINVAL;
1312 		}
1313 
1314 		/* match ? */
1315 		if (system_ram >= start && system_ram < end) {
1316 			*crash_size = size;
1317 			break;
1318 		}
1319 	} while (*cur++ == ',');
1320 
1321 	if (*crash_size > 0) {
1322 		while (*cur && *cur != ' ' && *cur != '@')
1323 			cur++;
1324 		if (*cur == '@') {
1325 			cur++;
1326 			*crash_base = memparse(cur, &tmp);
1327 			if (cur == tmp) {
1328 				pr_warning("Memory value expected "
1329 						"after '@'\n");
1330 				return -EINVAL;
1331 			}
1332 		}
1333 	}
1334 
1335 	return 0;
1336 }
1337 
1338 /*
1339  * That function parses "simple" (old) crashkernel command lines like
1340  *
1341  * 	crashkernel=size[@offset]
1342  *
1343  * It returns 0 on success and -EINVAL on failure.
1344  */
1345 static int __init parse_crashkernel_simple(char 		*cmdline,
1346 					   unsigned long long 	*crash_size,
1347 					   unsigned long long 	*crash_base)
1348 {
1349 	char *cur = cmdline;
1350 
1351 	*crash_size = memparse(cmdline, &cur);
1352 	if (cmdline == cur) {
1353 		pr_warning("crashkernel: memory value expected\n");
1354 		return -EINVAL;
1355 	}
1356 
1357 	if (*cur == '@')
1358 		*crash_base = memparse(cur+1, &cur);
1359 	else if (*cur != ' ' && *cur != '\0') {
1360 		pr_warning("crashkernel: unrecognized char\n");
1361 		return -EINVAL;
1362 	}
1363 
1364 	return 0;
1365 }
1366 
1367 #define SUFFIX_HIGH 0
1368 #define SUFFIX_LOW  1
1369 #define SUFFIX_NULL 2
1370 static __initdata char *suffix_tbl[] = {
1371 	[SUFFIX_HIGH] = ",high",
1372 	[SUFFIX_LOW]  = ",low",
1373 	[SUFFIX_NULL] = NULL,
1374 };
1375 
1376 /*
1377  * That function parses "suffix"  crashkernel command lines like
1378  *
1379  *	crashkernel=size,[high|low]
1380  *
1381  * It returns 0 on success and -EINVAL on failure.
1382  */
1383 static int __init parse_crashkernel_suffix(char *cmdline,
1384 					   unsigned long long	*crash_size,
1385 					   unsigned long long	*crash_base,
1386 					   const char *suffix)
1387 {
1388 	char *cur = cmdline;
1389 
1390 	*crash_size = memparse(cmdline, &cur);
1391 	if (cmdline == cur) {
1392 		pr_warn("crashkernel: memory value expected\n");
1393 		return -EINVAL;
1394 	}
1395 
1396 	/* check with suffix */
1397 	if (strncmp(cur, suffix, strlen(suffix))) {
1398 		pr_warn("crashkernel: unrecognized char\n");
1399 		return -EINVAL;
1400 	}
1401 	cur += strlen(suffix);
1402 	if (*cur != ' ' && *cur != '\0') {
1403 		pr_warn("crashkernel: unrecognized char\n");
1404 		return -EINVAL;
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 static __init char *get_last_crashkernel(char *cmdline,
1411 			     const char *name,
1412 			     const char *suffix)
1413 {
1414 	char *p = cmdline, *ck_cmdline = NULL;
1415 
1416 	/* find crashkernel and use the last one if there are more */
1417 	p = strstr(p, name);
1418 	while (p) {
1419 		char *end_p = strchr(p, ' ');
1420 		char *q;
1421 
1422 		if (!end_p)
1423 			end_p = p + strlen(p);
1424 
1425 		if (!suffix) {
1426 			int i;
1427 
1428 			/* skip the one with any known suffix */
1429 			for (i = 0; suffix_tbl[i]; i++) {
1430 				q = end_p - strlen(suffix_tbl[i]);
1431 				if (!strncmp(q, suffix_tbl[i],
1432 					     strlen(suffix_tbl[i])))
1433 					goto next;
1434 			}
1435 			ck_cmdline = p;
1436 		} else {
1437 			q = end_p - strlen(suffix);
1438 			if (!strncmp(q, suffix, strlen(suffix)))
1439 				ck_cmdline = p;
1440 		}
1441 next:
1442 		p = strstr(p+1, name);
1443 	}
1444 
1445 	if (!ck_cmdline)
1446 		return NULL;
1447 
1448 	return ck_cmdline;
1449 }
1450 
1451 static int __init __parse_crashkernel(char *cmdline,
1452 			     unsigned long long system_ram,
1453 			     unsigned long long *crash_size,
1454 			     unsigned long long *crash_base,
1455 			     const char *name,
1456 			     const char *suffix)
1457 {
1458 	char	*first_colon, *first_space;
1459 	char	*ck_cmdline;
1460 
1461 	BUG_ON(!crash_size || !crash_base);
1462 	*crash_size = 0;
1463 	*crash_base = 0;
1464 
1465 	ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
1466 
1467 	if (!ck_cmdline)
1468 		return -EINVAL;
1469 
1470 	ck_cmdline += strlen(name);
1471 
1472 	if (suffix)
1473 		return parse_crashkernel_suffix(ck_cmdline, crash_size,
1474 				crash_base, suffix);
1475 	/*
1476 	 * if the commandline contains a ':', then that's the extended
1477 	 * syntax -- if not, it must be the classic syntax
1478 	 */
1479 	first_colon = strchr(ck_cmdline, ':');
1480 	first_space = strchr(ck_cmdline, ' ');
1481 	if (first_colon && (!first_space || first_colon < first_space))
1482 		return parse_crashkernel_mem(ck_cmdline, system_ram,
1483 				crash_size, crash_base);
1484 	else
1485 		return parse_crashkernel_simple(ck_cmdline, crash_size,
1486 				crash_base);
1487 
1488 	return 0;
1489 }
1490 
1491 /*
1492  * That function is the entry point for command line parsing and should be
1493  * called from the arch-specific code.
1494  */
1495 int __init parse_crashkernel(char *cmdline,
1496 			     unsigned long long system_ram,
1497 			     unsigned long long *crash_size,
1498 			     unsigned long long *crash_base)
1499 {
1500 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1501 					"crashkernel=", NULL);
1502 }
1503 
1504 int __init parse_crashkernel_high(char *cmdline,
1505 			     unsigned long long system_ram,
1506 			     unsigned long long *crash_size,
1507 			     unsigned long long *crash_base)
1508 {
1509 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1510 				"crashkernel=", suffix_tbl[SUFFIX_HIGH]);
1511 }
1512 
1513 int __init parse_crashkernel_low(char *cmdline,
1514 			     unsigned long long system_ram,
1515 			     unsigned long long *crash_size,
1516 			     unsigned long long *crash_base)
1517 {
1518 	return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base,
1519 				"crashkernel=", suffix_tbl[SUFFIX_LOW]);
1520 }
1521 
1522 static void update_vmcoreinfo_note(void)
1523 {
1524 	u32 *buf = vmcoreinfo_note;
1525 
1526 	if (!vmcoreinfo_size)
1527 		return;
1528 	buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
1529 			      vmcoreinfo_size);
1530 	final_note(buf);
1531 }
1532 
1533 void crash_save_vmcoreinfo(void)
1534 {
1535 	vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds());
1536 	update_vmcoreinfo_note();
1537 }
1538 
1539 void vmcoreinfo_append_str(const char *fmt, ...)
1540 {
1541 	va_list args;
1542 	char buf[0x50];
1543 	int r;
1544 
1545 	va_start(args, fmt);
1546 	r = vsnprintf(buf, sizeof(buf), fmt, args);
1547 	va_end(args);
1548 
1549 	if (r + vmcoreinfo_size > vmcoreinfo_max_size)
1550 		r = vmcoreinfo_max_size - vmcoreinfo_size;
1551 
1552 	memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1553 
1554 	vmcoreinfo_size += r;
1555 }
1556 
1557 /*
1558  * provide an empty default implementation here -- architecture
1559  * code may override this
1560  */
1561 void __attribute__ ((weak)) arch_crash_save_vmcoreinfo(void)
1562 {}
1563 
1564 unsigned long __attribute__ ((weak)) paddr_vmcoreinfo_note(void)
1565 {
1566 	return __pa((unsigned long)(char *)&vmcoreinfo_note);
1567 }
1568 
1569 static int __init crash_save_vmcoreinfo_init(void)
1570 {
1571 	VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
1572 	VMCOREINFO_PAGESIZE(PAGE_SIZE);
1573 
1574 	VMCOREINFO_SYMBOL(init_uts_ns);
1575 	VMCOREINFO_SYMBOL(node_online_map);
1576 #ifdef CONFIG_MMU
1577 	VMCOREINFO_SYMBOL(swapper_pg_dir);
1578 #endif
1579 	VMCOREINFO_SYMBOL(_stext);
1580 	VMCOREINFO_SYMBOL(vmap_area_list);
1581 
1582 #ifndef CONFIG_NEED_MULTIPLE_NODES
1583 	VMCOREINFO_SYMBOL(mem_map);
1584 	VMCOREINFO_SYMBOL(contig_page_data);
1585 #endif
1586 #ifdef CONFIG_SPARSEMEM
1587 	VMCOREINFO_SYMBOL(mem_section);
1588 	VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
1589 	VMCOREINFO_STRUCT_SIZE(mem_section);
1590 	VMCOREINFO_OFFSET(mem_section, section_mem_map);
1591 #endif
1592 	VMCOREINFO_STRUCT_SIZE(page);
1593 	VMCOREINFO_STRUCT_SIZE(pglist_data);
1594 	VMCOREINFO_STRUCT_SIZE(zone);
1595 	VMCOREINFO_STRUCT_SIZE(free_area);
1596 	VMCOREINFO_STRUCT_SIZE(list_head);
1597 	VMCOREINFO_SIZE(nodemask_t);
1598 	VMCOREINFO_OFFSET(page, flags);
1599 	VMCOREINFO_OFFSET(page, _count);
1600 	VMCOREINFO_OFFSET(page, mapping);
1601 	VMCOREINFO_OFFSET(page, lru);
1602 	VMCOREINFO_OFFSET(page, _mapcount);
1603 	VMCOREINFO_OFFSET(page, private);
1604 	VMCOREINFO_OFFSET(pglist_data, node_zones);
1605 	VMCOREINFO_OFFSET(pglist_data, nr_zones);
1606 #ifdef CONFIG_FLAT_NODE_MEM_MAP
1607 	VMCOREINFO_OFFSET(pglist_data, node_mem_map);
1608 #endif
1609 	VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
1610 	VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
1611 	VMCOREINFO_OFFSET(pglist_data, node_id);
1612 	VMCOREINFO_OFFSET(zone, free_area);
1613 	VMCOREINFO_OFFSET(zone, vm_stat);
1614 	VMCOREINFO_OFFSET(zone, spanned_pages);
1615 	VMCOREINFO_OFFSET(free_area, free_list);
1616 	VMCOREINFO_OFFSET(list_head, next);
1617 	VMCOREINFO_OFFSET(list_head, prev);
1618 	VMCOREINFO_OFFSET(vmap_area, va_start);
1619 	VMCOREINFO_OFFSET(vmap_area, list);
1620 	VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1621 	log_buf_kexec_setup();
1622 	VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1623 	VMCOREINFO_NUMBER(NR_FREE_PAGES);
1624 	VMCOREINFO_NUMBER(PG_lru);
1625 	VMCOREINFO_NUMBER(PG_private);
1626 	VMCOREINFO_NUMBER(PG_swapcache);
1627 	VMCOREINFO_NUMBER(PG_slab);
1628 #ifdef CONFIG_MEMORY_FAILURE
1629 	VMCOREINFO_NUMBER(PG_hwpoison);
1630 #endif
1631 	VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1632 
1633 	arch_crash_save_vmcoreinfo();
1634 	update_vmcoreinfo_note();
1635 
1636 	return 0;
1637 }
1638 
1639 module_init(crash_save_vmcoreinfo_init)
1640 
1641 /*
1642  * Move into place and start executing a preloaded standalone
1643  * executable.  If nothing was preloaded return an error.
1644  */
1645 int kernel_kexec(void)
1646 {
1647 	int error = 0;
1648 
1649 	if (!mutex_trylock(&kexec_mutex))
1650 		return -EBUSY;
1651 	if (!kexec_image) {
1652 		error = -EINVAL;
1653 		goto Unlock;
1654 	}
1655 
1656 #ifdef CONFIG_KEXEC_JUMP
1657 	if (kexec_image->preserve_context) {
1658 		lock_system_sleep();
1659 		pm_prepare_console();
1660 		error = freeze_processes();
1661 		if (error) {
1662 			error = -EBUSY;
1663 			goto Restore_console;
1664 		}
1665 		suspend_console();
1666 		error = dpm_suspend_start(PMSG_FREEZE);
1667 		if (error)
1668 			goto Resume_console;
1669 		/* At this point, dpm_suspend_start() has been called,
1670 		 * but *not* dpm_suspend_end(). We *must* call
1671 		 * dpm_suspend_end() now.  Otherwise, drivers for
1672 		 * some devices (e.g. interrupt controllers) become
1673 		 * desynchronized with the actual state of the
1674 		 * hardware at resume time, and evil weirdness ensues.
1675 		 */
1676 		error = dpm_suspend_end(PMSG_FREEZE);
1677 		if (error)
1678 			goto Resume_devices;
1679 		error = disable_nonboot_cpus();
1680 		if (error)
1681 			goto Enable_cpus;
1682 		local_irq_disable();
1683 		error = syscore_suspend();
1684 		if (error)
1685 			goto Enable_irqs;
1686 	} else
1687 #endif
1688 	{
1689 		kernel_restart_prepare(NULL);
1690 		printk(KERN_EMERG "Starting new kernel\n");
1691 		machine_shutdown();
1692 	}
1693 
1694 	machine_kexec(kexec_image);
1695 
1696 #ifdef CONFIG_KEXEC_JUMP
1697 	if (kexec_image->preserve_context) {
1698 		syscore_resume();
1699  Enable_irqs:
1700 		local_irq_enable();
1701  Enable_cpus:
1702 		enable_nonboot_cpus();
1703 		dpm_resume_start(PMSG_RESTORE);
1704  Resume_devices:
1705 		dpm_resume_end(PMSG_RESTORE);
1706  Resume_console:
1707 		resume_console();
1708 		thaw_processes();
1709  Restore_console:
1710 		pm_restore_console();
1711 		unlock_system_sleep();
1712 	}
1713 #endif
1714 
1715  Unlock:
1716 	mutex_unlock(&kexec_mutex);
1717 	return error;
1718 }
1719