xref: /linux/fs/proc/vmcore.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	fs/proc/vmcore.c Interface for accessing the crash
4  * 				 dump from the system's previous life.
5  * 	Heavily borrowed from fs/proc/kcore.c
6  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7  *	Copyright (C) IBM Corporation, 2004. All rights reserved
8  *
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uaccess.h>
29 #include <linux/cc_platform.h>
30 #include <asm/io.h>
31 #include "internal.h"
32 
33 /* List representing chunks of contiguous memory areas and their offsets in
34  * vmcore file.
35  */
36 static LIST_HEAD(vmcore_list);
37 
38 /* Stores the pointer to the buffer containing kernel elf core headers. */
39 static char *elfcorebuf;
40 static size_t elfcorebuf_sz;
41 static size_t elfcorebuf_sz_orig;
42 
43 static char *elfnotes_buf;
44 static size_t elfnotes_sz;
45 /* Size of all notes minus the device dump notes */
46 static size_t elfnotes_orig_sz;
47 
48 /* Total size of vmcore file. */
49 static u64 vmcore_size;
50 
51 static struct proc_dir_entry *proc_vmcore;
52 
53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54 /* Device Dump list and mutex to synchronize access to list */
55 static LIST_HEAD(vmcoredd_list);
56 static DEFINE_MUTEX(vmcoredd_mutex);
57 
58 static bool vmcoredd_disabled;
59 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
61 
62 /* Device Dump Size */
63 static size_t vmcoredd_orig_sz;
64 
65 static DECLARE_RWSEM(vmcore_cb_rwsem);
66 /* List of registered vmcore callbacks. */
67 static LIST_HEAD(vmcore_cb_list);
68 /* Whether the vmcore has been opened once. */
69 static bool vmcore_opened;
70 
71 void register_vmcore_cb(struct vmcore_cb *cb)
72 {
73 	down_write(&vmcore_cb_rwsem);
74 	INIT_LIST_HEAD(&cb->next);
75 	list_add_tail(&cb->next, &vmcore_cb_list);
76 	/*
77 	 * Registering a vmcore callback after the vmcore was opened is
78 	 * very unusual (e.g., manual driver loading).
79 	 */
80 	if (vmcore_opened)
81 		pr_warn_once("Unexpected vmcore callback registration\n");
82 	up_write(&vmcore_cb_rwsem);
83 }
84 EXPORT_SYMBOL_GPL(register_vmcore_cb);
85 
86 void unregister_vmcore_cb(struct vmcore_cb *cb)
87 {
88 	down_write(&vmcore_cb_rwsem);
89 	list_del(&cb->next);
90 	/*
91 	 * Unregistering a vmcore callback after the vmcore was opened is
92 	 * very unusual (e.g., forced driver removal), but we cannot stop
93 	 * unregistering.
94 	 */
95 	if (vmcore_opened)
96 		pr_warn_once("Unexpected vmcore callback unregistration\n");
97 	up_write(&vmcore_cb_rwsem);
98 }
99 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
100 
101 static bool pfn_is_ram(unsigned long pfn)
102 {
103 	struct vmcore_cb *cb;
104 	bool ret = true;
105 
106 	lockdep_assert_held_read(&vmcore_cb_rwsem);
107 
108 	list_for_each_entry(cb, &vmcore_cb_list, next) {
109 		if (unlikely(!cb->pfn_is_ram))
110 			continue;
111 		ret = cb->pfn_is_ram(cb, pfn);
112 		if (!ret)
113 			break;
114 	}
115 
116 	return ret;
117 }
118 
119 static int open_vmcore(struct inode *inode, struct file *file)
120 {
121 	down_read(&vmcore_cb_rwsem);
122 	vmcore_opened = true;
123 	up_read(&vmcore_cb_rwsem);
124 
125 	return 0;
126 }
127 
128 /* Reads a page from the oldmem device from given offset. */
129 ssize_t read_from_oldmem(char *buf, size_t count,
130 			 u64 *ppos, int userbuf,
131 			 bool encrypted)
132 {
133 	unsigned long pfn, offset;
134 	size_t nr_bytes;
135 	ssize_t read = 0, tmp;
136 
137 	if (!count)
138 		return 0;
139 
140 	offset = (unsigned long)(*ppos % PAGE_SIZE);
141 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
142 
143 	down_read(&vmcore_cb_rwsem);
144 	do {
145 		if (count > (PAGE_SIZE - offset))
146 			nr_bytes = PAGE_SIZE - offset;
147 		else
148 			nr_bytes = count;
149 
150 		/* If pfn is not ram, return zeros for sparse dump files */
151 		if (!pfn_is_ram(pfn)) {
152 			tmp = 0;
153 			if (!userbuf)
154 				memset(buf, 0, nr_bytes);
155 			else if (clear_user(buf, nr_bytes))
156 				tmp = -EFAULT;
157 		} else {
158 			if (encrypted)
159 				tmp = copy_oldmem_page_encrypted(pfn, buf,
160 								 nr_bytes,
161 								 offset,
162 								 userbuf);
163 			else
164 				tmp = copy_oldmem_page(pfn, buf, nr_bytes,
165 						       offset, userbuf);
166 		}
167 		if (tmp < 0) {
168 			up_read(&vmcore_cb_rwsem);
169 			return tmp;
170 		}
171 
172 		*ppos += nr_bytes;
173 		count -= nr_bytes;
174 		buf += nr_bytes;
175 		read += nr_bytes;
176 		++pfn;
177 		offset = 0;
178 	} while (count);
179 
180 	up_read(&vmcore_cb_rwsem);
181 	return read;
182 }
183 
184 /*
185  * Architectures may override this function to allocate ELF header in 2nd kernel
186  */
187 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
188 {
189 	return 0;
190 }
191 
192 /*
193  * Architectures may override this function to free header
194  */
195 void __weak elfcorehdr_free(unsigned long long addr)
196 {}
197 
198 /*
199  * Architectures may override this function to read from ELF header
200  */
201 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
202 {
203 	return read_from_oldmem(buf, count, ppos, 0, false);
204 }
205 
206 /*
207  * Architectures may override this function to read from notes sections
208  */
209 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
210 {
211 	return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
212 }
213 
214 /*
215  * Architectures may override this function to map oldmem
216  */
217 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
218 				  unsigned long from, unsigned long pfn,
219 				  unsigned long size, pgprot_t prot)
220 {
221 	prot = pgprot_encrypted(prot);
222 	return remap_pfn_range(vma, from, pfn, size, prot);
223 }
224 
225 /*
226  * Architectures which support memory encryption override this.
227  */
228 ssize_t __weak
229 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
230 			   unsigned long offset, int userbuf)
231 {
232 	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
233 }
234 
235 /*
236  * Copy to either kernel or user space
237  */
238 static int copy_to(void *target, void *src, size_t size, int userbuf)
239 {
240 	if (userbuf) {
241 		if (copy_to_user((char __user *) target, src, size))
242 			return -EFAULT;
243 	} else {
244 		memcpy(target, src, size);
245 	}
246 	return 0;
247 }
248 
249 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
250 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
251 {
252 	struct vmcoredd_node *dump;
253 	u64 offset = 0;
254 	int ret = 0;
255 	size_t tsz;
256 	char *buf;
257 
258 	mutex_lock(&vmcoredd_mutex);
259 	list_for_each_entry(dump, &vmcoredd_list, list) {
260 		if (start < offset + dump->size) {
261 			tsz = min(offset + (u64)dump->size - start, (u64)size);
262 			buf = dump->buf + start - offset;
263 			if (copy_to(dst, buf, tsz, userbuf)) {
264 				ret = -EFAULT;
265 				goto out_unlock;
266 			}
267 
268 			size -= tsz;
269 			start += tsz;
270 			dst += tsz;
271 
272 			/* Leave now if buffer filled already */
273 			if (!size)
274 				goto out_unlock;
275 		}
276 		offset += dump->size;
277 	}
278 
279 out_unlock:
280 	mutex_unlock(&vmcoredd_mutex);
281 	return ret;
282 }
283 
284 #ifdef CONFIG_MMU
285 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
286 			       u64 start, size_t size)
287 {
288 	struct vmcoredd_node *dump;
289 	u64 offset = 0;
290 	int ret = 0;
291 	size_t tsz;
292 	char *buf;
293 
294 	mutex_lock(&vmcoredd_mutex);
295 	list_for_each_entry(dump, &vmcoredd_list, list) {
296 		if (start < offset + dump->size) {
297 			tsz = min(offset + (u64)dump->size - start, (u64)size);
298 			buf = dump->buf + start - offset;
299 			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
300 							tsz)) {
301 				ret = -EFAULT;
302 				goto out_unlock;
303 			}
304 
305 			size -= tsz;
306 			start += tsz;
307 			dst += tsz;
308 
309 			/* Leave now if buffer filled already */
310 			if (!size)
311 				goto out_unlock;
312 		}
313 		offset += dump->size;
314 	}
315 
316 out_unlock:
317 	mutex_unlock(&vmcoredd_mutex);
318 	return ret;
319 }
320 #endif /* CONFIG_MMU */
321 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
322 
323 /* Read from the ELF header and then the crash dump. On error, negative value is
324  * returned otherwise number of bytes read are returned.
325  */
326 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
327 			     int userbuf)
328 {
329 	ssize_t acc = 0, tmp;
330 	size_t tsz;
331 	u64 start;
332 	struct vmcore *m = NULL;
333 
334 	if (buflen == 0 || *fpos >= vmcore_size)
335 		return 0;
336 
337 	/* trim buflen to not go beyond EOF */
338 	if (buflen > vmcore_size - *fpos)
339 		buflen = vmcore_size - *fpos;
340 
341 	/* Read ELF core header */
342 	if (*fpos < elfcorebuf_sz) {
343 		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
344 		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
345 			return -EFAULT;
346 		buflen -= tsz;
347 		*fpos += tsz;
348 		buffer += tsz;
349 		acc += tsz;
350 
351 		/* leave now if filled buffer already */
352 		if (buflen == 0)
353 			return acc;
354 	}
355 
356 	/* Read Elf note segment */
357 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
358 		void *kaddr;
359 
360 		/* We add device dumps before other elf notes because the
361 		 * other elf notes may not fill the elf notes buffer
362 		 * completely and we will end up with zero-filled data
363 		 * between the elf notes and the device dumps. Tools will
364 		 * then try to decode this zero-filled data as valid notes
365 		 * and we don't want that. Hence, adding device dumps before
366 		 * the other elf notes ensure that zero-filled data can be
367 		 * avoided.
368 		 */
369 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
370 		/* Read device dumps */
371 		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
372 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
373 				  (size_t)*fpos, buflen);
374 			start = *fpos - elfcorebuf_sz;
375 			if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
376 				return -EFAULT;
377 
378 			buflen -= tsz;
379 			*fpos += tsz;
380 			buffer += tsz;
381 			acc += tsz;
382 
383 			/* leave now if filled buffer already */
384 			if (!buflen)
385 				return acc;
386 		}
387 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
388 
389 		/* Read remaining elf notes */
390 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
391 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
392 		if (copy_to(buffer, kaddr, tsz, userbuf))
393 			return -EFAULT;
394 
395 		buflen -= tsz;
396 		*fpos += tsz;
397 		buffer += tsz;
398 		acc += tsz;
399 
400 		/* leave now if filled buffer already */
401 		if (buflen == 0)
402 			return acc;
403 	}
404 
405 	list_for_each_entry(m, &vmcore_list, list) {
406 		if (*fpos < m->offset + m->size) {
407 			tsz = (size_t)min_t(unsigned long long,
408 					    m->offset + m->size - *fpos,
409 					    buflen);
410 			start = m->paddr + *fpos - m->offset;
411 			tmp = read_from_oldmem(buffer, tsz, &start,
412 					       userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
413 			if (tmp < 0)
414 				return tmp;
415 			buflen -= tsz;
416 			*fpos += tsz;
417 			buffer += tsz;
418 			acc += tsz;
419 
420 			/* leave now if filled buffer already */
421 			if (buflen == 0)
422 				return acc;
423 		}
424 	}
425 
426 	return acc;
427 }
428 
429 static ssize_t read_vmcore(struct file *file, char __user *buffer,
430 			   size_t buflen, loff_t *fpos)
431 {
432 	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
433 }
434 
435 /*
436  * The vmcore fault handler uses the page cache and fills data using the
437  * standard __vmcore_read() function.
438  *
439  * On s390 the fault handler is used for memory regions that can't be mapped
440  * directly with remap_pfn_range().
441  */
442 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
443 {
444 #ifdef CONFIG_S390
445 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
446 	pgoff_t index = vmf->pgoff;
447 	struct page *page;
448 	loff_t offset;
449 	char *buf;
450 	int rc;
451 
452 	page = find_or_create_page(mapping, index, GFP_KERNEL);
453 	if (!page)
454 		return VM_FAULT_OOM;
455 	if (!PageUptodate(page)) {
456 		offset = (loff_t) index << PAGE_SHIFT;
457 		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
458 		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
459 		if (rc < 0) {
460 			unlock_page(page);
461 			put_page(page);
462 			return vmf_error(rc);
463 		}
464 		SetPageUptodate(page);
465 	}
466 	unlock_page(page);
467 	vmf->page = page;
468 	return 0;
469 #else
470 	return VM_FAULT_SIGBUS;
471 #endif
472 }
473 
474 static const struct vm_operations_struct vmcore_mmap_ops = {
475 	.fault = mmap_vmcore_fault,
476 };
477 
478 /**
479  * vmcore_alloc_buf - allocate buffer in vmalloc memory
480  * @sizez: size of buffer
481  *
482  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
483  * the buffer to user-space by means of remap_vmalloc_range().
484  *
485  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
486  * disabled and there's no need to allow users to mmap the buffer.
487  */
488 static inline char *vmcore_alloc_buf(size_t size)
489 {
490 #ifdef CONFIG_MMU
491 	return vmalloc_user(size);
492 #else
493 	return vzalloc(size);
494 #endif
495 }
496 
497 /*
498  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
499  * essential for mmap_vmcore() in order to map physically
500  * non-contiguous objects (ELF header, ELF note segment and memory
501  * regions in the 1st kernel pointed to by PT_LOAD entries) into
502  * virtually contiguous user-space in ELF layout.
503  */
504 #ifdef CONFIG_MMU
505 /*
506  * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
507  * reported as not being ram with the zero page.
508  *
509  * @vma: vm_area_struct describing requested mapping
510  * @from: start remapping from
511  * @pfn: page frame number to start remapping to
512  * @size: remapping size
513  * @prot: protection bits
514  *
515  * Returns zero on success, -EAGAIN on failure.
516  */
517 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
518 				    unsigned long from, unsigned long pfn,
519 				    unsigned long size, pgprot_t prot)
520 {
521 	unsigned long map_size;
522 	unsigned long pos_start, pos_end, pos;
523 	unsigned long zeropage_pfn = my_zero_pfn(0);
524 	size_t len = 0;
525 
526 	pos_start = pfn;
527 	pos_end = pfn + (size >> PAGE_SHIFT);
528 
529 	for (pos = pos_start; pos < pos_end; ++pos) {
530 		if (!pfn_is_ram(pos)) {
531 			/*
532 			 * We hit a page which is not ram. Remap the continuous
533 			 * region between pos_start and pos-1 and replace
534 			 * the non-ram page at pos with the zero page.
535 			 */
536 			if (pos > pos_start) {
537 				/* Remap continuous region */
538 				map_size = (pos - pos_start) << PAGE_SHIFT;
539 				if (remap_oldmem_pfn_range(vma, from + len,
540 							   pos_start, map_size,
541 							   prot))
542 					goto fail;
543 				len += map_size;
544 			}
545 			/* Remap the zero page */
546 			if (remap_oldmem_pfn_range(vma, from + len,
547 						   zeropage_pfn,
548 						   PAGE_SIZE, prot))
549 				goto fail;
550 			len += PAGE_SIZE;
551 			pos_start = pos + 1;
552 		}
553 	}
554 	if (pos > pos_start) {
555 		/* Remap the rest */
556 		map_size = (pos - pos_start) << PAGE_SHIFT;
557 		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
558 					   map_size, prot))
559 			goto fail;
560 	}
561 	return 0;
562 fail:
563 	do_munmap(vma->vm_mm, from, len, NULL);
564 	return -EAGAIN;
565 }
566 
567 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
568 			    unsigned long from, unsigned long pfn,
569 			    unsigned long size, pgprot_t prot)
570 {
571 	int ret;
572 
573 	/*
574 	 * Check if oldmem_pfn_is_ram was registered to avoid
575 	 * looping over all pages without a reason.
576 	 */
577 	down_read(&vmcore_cb_rwsem);
578 	if (!list_empty(&vmcore_cb_list))
579 		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
580 	else
581 		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
582 	up_read(&vmcore_cb_rwsem);
583 	return ret;
584 }
585 
586 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
587 {
588 	size_t size = vma->vm_end - vma->vm_start;
589 	u64 start, end, len, tsz;
590 	struct vmcore *m;
591 
592 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
593 	end = start + size;
594 
595 	if (size > vmcore_size || end > vmcore_size)
596 		return -EINVAL;
597 
598 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
599 		return -EPERM;
600 
601 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
602 	vma->vm_flags |= VM_MIXEDMAP;
603 	vma->vm_ops = &vmcore_mmap_ops;
604 
605 	len = 0;
606 
607 	if (start < elfcorebuf_sz) {
608 		u64 pfn;
609 
610 		tsz = min(elfcorebuf_sz - (size_t)start, size);
611 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
612 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
613 				    vma->vm_page_prot))
614 			return -EAGAIN;
615 		size -= tsz;
616 		start += tsz;
617 		len += tsz;
618 
619 		if (size == 0)
620 			return 0;
621 	}
622 
623 	if (start < elfcorebuf_sz + elfnotes_sz) {
624 		void *kaddr;
625 
626 		/* We add device dumps before other elf notes because the
627 		 * other elf notes may not fill the elf notes buffer
628 		 * completely and we will end up with zero-filled data
629 		 * between the elf notes and the device dumps. Tools will
630 		 * then try to decode this zero-filled data as valid notes
631 		 * and we don't want that. Hence, adding device dumps before
632 		 * the other elf notes ensure that zero-filled data can be
633 		 * avoided. This also ensures that the device dumps and
634 		 * other elf notes can be properly mmaped at page aligned
635 		 * address.
636 		 */
637 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
638 		/* Read device dumps */
639 		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
640 			u64 start_off;
641 
642 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
643 				  (size_t)start, size);
644 			start_off = start - elfcorebuf_sz;
645 			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
646 						start_off, tsz))
647 				goto fail;
648 
649 			size -= tsz;
650 			start += tsz;
651 			len += tsz;
652 
653 			/* leave now if filled buffer already */
654 			if (!size)
655 				return 0;
656 		}
657 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
658 
659 		/* Read remaining elf notes */
660 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
661 		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
662 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
663 						kaddr, 0, tsz))
664 			goto fail;
665 
666 		size -= tsz;
667 		start += tsz;
668 		len += tsz;
669 
670 		if (size == 0)
671 			return 0;
672 	}
673 
674 	list_for_each_entry(m, &vmcore_list, list) {
675 		if (start < m->offset + m->size) {
676 			u64 paddr = 0;
677 
678 			tsz = (size_t)min_t(unsigned long long,
679 					    m->offset + m->size - start, size);
680 			paddr = m->paddr + start - m->offset;
681 			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
682 						    paddr >> PAGE_SHIFT, tsz,
683 						    vma->vm_page_prot))
684 				goto fail;
685 			size -= tsz;
686 			start += tsz;
687 			len += tsz;
688 
689 			if (size == 0)
690 				return 0;
691 		}
692 	}
693 
694 	return 0;
695 fail:
696 	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
697 	return -EAGAIN;
698 }
699 #else
700 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
701 {
702 	return -ENOSYS;
703 }
704 #endif
705 
706 static const struct proc_ops vmcore_proc_ops = {
707 	.proc_open	= open_vmcore,
708 	.proc_read	= read_vmcore,
709 	.proc_lseek	= default_llseek,
710 	.proc_mmap	= mmap_vmcore,
711 };
712 
713 static struct vmcore* __init get_new_element(void)
714 {
715 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
716 }
717 
718 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
719 			   struct list_head *vc_list)
720 {
721 	u64 size;
722 	struct vmcore *m;
723 
724 	size = elfsz + elfnotesegsz;
725 	list_for_each_entry(m, vc_list, list) {
726 		size += m->size;
727 	}
728 	return size;
729 }
730 
731 /**
732  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
733  *
734  * @ehdr_ptr: ELF header
735  *
736  * This function updates p_memsz member of each PT_NOTE entry in the
737  * program header table pointed to by @ehdr_ptr to real size of ELF
738  * note segment.
739  */
740 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
741 {
742 	int i, rc=0;
743 	Elf64_Phdr *phdr_ptr;
744 	Elf64_Nhdr *nhdr_ptr;
745 
746 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
747 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
748 		void *notes_section;
749 		u64 offset, max_sz, sz, real_sz = 0;
750 		if (phdr_ptr->p_type != PT_NOTE)
751 			continue;
752 		max_sz = phdr_ptr->p_memsz;
753 		offset = phdr_ptr->p_offset;
754 		notes_section = kmalloc(max_sz, GFP_KERNEL);
755 		if (!notes_section)
756 			return -ENOMEM;
757 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
758 		if (rc < 0) {
759 			kfree(notes_section);
760 			return rc;
761 		}
762 		nhdr_ptr = notes_section;
763 		while (nhdr_ptr->n_namesz != 0) {
764 			sz = sizeof(Elf64_Nhdr) +
765 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
766 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
767 			if ((real_sz + sz) > max_sz) {
768 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
769 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
770 				break;
771 			}
772 			real_sz += sz;
773 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
774 		}
775 		kfree(notes_section);
776 		phdr_ptr->p_memsz = real_sz;
777 		if (real_sz == 0) {
778 			pr_warn("Warning: Zero PT_NOTE entries found\n");
779 		}
780 	}
781 
782 	return 0;
783 }
784 
785 /**
786  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
787  * headers and sum of real size of their ELF note segment headers and
788  * data.
789  *
790  * @ehdr_ptr: ELF header
791  * @nr_ptnote: buffer for the number of PT_NOTE program headers
792  * @sz_ptnote: buffer for size of unique PT_NOTE program header
793  *
794  * This function is used to merge multiple PT_NOTE program headers
795  * into a unique single one. The resulting unique entry will have
796  * @sz_ptnote in its phdr->p_mem.
797  *
798  * It is assumed that program headers with PT_NOTE type pointed to by
799  * @ehdr_ptr has already been updated by update_note_header_size_elf64
800  * and each of PT_NOTE program headers has actual ELF note segment
801  * size in its p_memsz member.
802  */
803 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
804 						 int *nr_ptnote, u64 *sz_ptnote)
805 {
806 	int i;
807 	Elf64_Phdr *phdr_ptr;
808 
809 	*nr_ptnote = *sz_ptnote = 0;
810 
811 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
812 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
813 		if (phdr_ptr->p_type != PT_NOTE)
814 			continue;
815 		*nr_ptnote += 1;
816 		*sz_ptnote += phdr_ptr->p_memsz;
817 	}
818 
819 	return 0;
820 }
821 
822 /**
823  * copy_notes_elf64 - copy ELF note segments in a given buffer
824  *
825  * @ehdr_ptr: ELF header
826  * @notes_buf: buffer into which ELF note segments are copied
827  *
828  * This function is used to copy ELF note segment in the 1st kernel
829  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
830  * size of the buffer @notes_buf is equal to or larger than sum of the
831  * real ELF note segment headers and data.
832  *
833  * It is assumed that program headers with PT_NOTE type pointed to by
834  * @ehdr_ptr has already been updated by update_note_header_size_elf64
835  * and each of PT_NOTE program headers has actual ELF note segment
836  * size in its p_memsz member.
837  */
838 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
839 {
840 	int i, rc=0;
841 	Elf64_Phdr *phdr_ptr;
842 
843 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
844 
845 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
846 		u64 offset;
847 		if (phdr_ptr->p_type != PT_NOTE)
848 			continue;
849 		offset = phdr_ptr->p_offset;
850 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
851 					   &offset);
852 		if (rc < 0)
853 			return rc;
854 		notes_buf += phdr_ptr->p_memsz;
855 	}
856 
857 	return 0;
858 }
859 
860 /* Merges all the PT_NOTE headers into one. */
861 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
862 					   char **notes_buf, size_t *notes_sz)
863 {
864 	int i, nr_ptnote=0, rc=0;
865 	char *tmp;
866 	Elf64_Ehdr *ehdr_ptr;
867 	Elf64_Phdr phdr;
868 	u64 phdr_sz = 0, note_off;
869 
870 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
871 
872 	rc = update_note_header_size_elf64(ehdr_ptr);
873 	if (rc < 0)
874 		return rc;
875 
876 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
877 	if (rc < 0)
878 		return rc;
879 
880 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
881 	*notes_buf = vmcore_alloc_buf(*notes_sz);
882 	if (!*notes_buf)
883 		return -ENOMEM;
884 
885 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
886 	if (rc < 0)
887 		return rc;
888 
889 	/* Prepare merged PT_NOTE program header. */
890 	phdr.p_type    = PT_NOTE;
891 	phdr.p_flags   = 0;
892 	note_off = sizeof(Elf64_Ehdr) +
893 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
894 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
895 	phdr.p_vaddr   = phdr.p_paddr = 0;
896 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
897 	phdr.p_align   = 0;
898 
899 	/* Add merged PT_NOTE program header*/
900 	tmp = elfptr + sizeof(Elf64_Ehdr);
901 	memcpy(tmp, &phdr, sizeof(phdr));
902 	tmp += sizeof(phdr);
903 
904 	/* Remove unwanted PT_NOTE program headers. */
905 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
906 	*elfsz = *elfsz - i;
907 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
908 	memset(elfptr + *elfsz, 0, i);
909 	*elfsz = roundup(*elfsz, PAGE_SIZE);
910 
911 	/* Modify e_phnum to reflect merged headers. */
912 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
913 
914 	/* Store the size of all notes.  We need this to update the note
915 	 * header when the device dumps will be added.
916 	 */
917 	elfnotes_orig_sz = phdr.p_memsz;
918 
919 	return 0;
920 }
921 
922 /**
923  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
924  *
925  * @ehdr_ptr: ELF header
926  *
927  * This function updates p_memsz member of each PT_NOTE entry in the
928  * program header table pointed to by @ehdr_ptr to real size of ELF
929  * note segment.
930  */
931 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
932 {
933 	int i, rc=0;
934 	Elf32_Phdr *phdr_ptr;
935 	Elf32_Nhdr *nhdr_ptr;
936 
937 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
938 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
939 		void *notes_section;
940 		u64 offset, max_sz, sz, real_sz = 0;
941 		if (phdr_ptr->p_type != PT_NOTE)
942 			continue;
943 		max_sz = phdr_ptr->p_memsz;
944 		offset = phdr_ptr->p_offset;
945 		notes_section = kmalloc(max_sz, GFP_KERNEL);
946 		if (!notes_section)
947 			return -ENOMEM;
948 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
949 		if (rc < 0) {
950 			kfree(notes_section);
951 			return rc;
952 		}
953 		nhdr_ptr = notes_section;
954 		while (nhdr_ptr->n_namesz != 0) {
955 			sz = sizeof(Elf32_Nhdr) +
956 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
957 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
958 			if ((real_sz + sz) > max_sz) {
959 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
960 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
961 				break;
962 			}
963 			real_sz += sz;
964 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
965 		}
966 		kfree(notes_section);
967 		phdr_ptr->p_memsz = real_sz;
968 		if (real_sz == 0) {
969 			pr_warn("Warning: Zero PT_NOTE entries found\n");
970 		}
971 	}
972 
973 	return 0;
974 }
975 
976 /**
977  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
978  * headers and sum of real size of their ELF note segment headers and
979  * data.
980  *
981  * @ehdr_ptr: ELF header
982  * @nr_ptnote: buffer for the number of PT_NOTE program headers
983  * @sz_ptnote: buffer for size of unique PT_NOTE program header
984  *
985  * This function is used to merge multiple PT_NOTE program headers
986  * into a unique single one. The resulting unique entry will have
987  * @sz_ptnote in its phdr->p_mem.
988  *
989  * It is assumed that program headers with PT_NOTE type pointed to by
990  * @ehdr_ptr has already been updated by update_note_header_size_elf32
991  * and each of PT_NOTE program headers has actual ELF note segment
992  * size in its p_memsz member.
993  */
994 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
995 						 int *nr_ptnote, u64 *sz_ptnote)
996 {
997 	int i;
998 	Elf32_Phdr *phdr_ptr;
999 
1000 	*nr_ptnote = *sz_ptnote = 0;
1001 
1002 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
1003 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1004 		if (phdr_ptr->p_type != PT_NOTE)
1005 			continue;
1006 		*nr_ptnote += 1;
1007 		*sz_ptnote += phdr_ptr->p_memsz;
1008 	}
1009 
1010 	return 0;
1011 }
1012 
1013 /**
1014  * copy_notes_elf32 - copy ELF note segments in a given buffer
1015  *
1016  * @ehdr_ptr: ELF header
1017  * @notes_buf: buffer into which ELF note segments are copied
1018  *
1019  * This function is used to copy ELF note segment in the 1st kernel
1020  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1021  * size of the buffer @notes_buf is equal to or larger than sum of the
1022  * real ELF note segment headers and data.
1023  *
1024  * It is assumed that program headers with PT_NOTE type pointed to by
1025  * @ehdr_ptr has already been updated by update_note_header_size_elf32
1026  * and each of PT_NOTE program headers has actual ELF note segment
1027  * size in its p_memsz member.
1028  */
1029 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1030 {
1031 	int i, rc=0;
1032 	Elf32_Phdr *phdr_ptr;
1033 
1034 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1035 
1036 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1037 		u64 offset;
1038 		if (phdr_ptr->p_type != PT_NOTE)
1039 			continue;
1040 		offset = phdr_ptr->p_offset;
1041 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1042 					   &offset);
1043 		if (rc < 0)
1044 			return rc;
1045 		notes_buf += phdr_ptr->p_memsz;
1046 	}
1047 
1048 	return 0;
1049 }
1050 
1051 /* Merges all the PT_NOTE headers into one. */
1052 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1053 					   char **notes_buf, size_t *notes_sz)
1054 {
1055 	int i, nr_ptnote=0, rc=0;
1056 	char *tmp;
1057 	Elf32_Ehdr *ehdr_ptr;
1058 	Elf32_Phdr phdr;
1059 	u64 phdr_sz = 0, note_off;
1060 
1061 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1062 
1063 	rc = update_note_header_size_elf32(ehdr_ptr);
1064 	if (rc < 0)
1065 		return rc;
1066 
1067 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1068 	if (rc < 0)
1069 		return rc;
1070 
1071 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1072 	*notes_buf = vmcore_alloc_buf(*notes_sz);
1073 	if (!*notes_buf)
1074 		return -ENOMEM;
1075 
1076 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1077 	if (rc < 0)
1078 		return rc;
1079 
1080 	/* Prepare merged PT_NOTE program header. */
1081 	phdr.p_type    = PT_NOTE;
1082 	phdr.p_flags   = 0;
1083 	note_off = sizeof(Elf32_Ehdr) +
1084 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1085 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1086 	phdr.p_vaddr   = phdr.p_paddr = 0;
1087 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1088 	phdr.p_align   = 0;
1089 
1090 	/* Add merged PT_NOTE program header*/
1091 	tmp = elfptr + sizeof(Elf32_Ehdr);
1092 	memcpy(tmp, &phdr, sizeof(phdr));
1093 	tmp += sizeof(phdr);
1094 
1095 	/* Remove unwanted PT_NOTE program headers. */
1096 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1097 	*elfsz = *elfsz - i;
1098 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1099 	memset(elfptr + *elfsz, 0, i);
1100 	*elfsz = roundup(*elfsz, PAGE_SIZE);
1101 
1102 	/* Modify e_phnum to reflect merged headers. */
1103 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1104 
1105 	/* Store the size of all notes.  We need this to update the note
1106 	 * header when the device dumps will be added.
1107 	 */
1108 	elfnotes_orig_sz = phdr.p_memsz;
1109 
1110 	return 0;
1111 }
1112 
1113 /* Add memory chunks represented by program headers to vmcore list. Also update
1114  * the new offset fields of exported program headers. */
1115 static int __init process_ptload_program_headers_elf64(char *elfptr,
1116 						size_t elfsz,
1117 						size_t elfnotes_sz,
1118 						struct list_head *vc_list)
1119 {
1120 	int i;
1121 	Elf64_Ehdr *ehdr_ptr;
1122 	Elf64_Phdr *phdr_ptr;
1123 	loff_t vmcore_off;
1124 	struct vmcore *new;
1125 
1126 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1127 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1128 
1129 	/* Skip Elf header, program headers and Elf note segment. */
1130 	vmcore_off = elfsz + elfnotes_sz;
1131 
1132 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1133 		u64 paddr, start, end, size;
1134 
1135 		if (phdr_ptr->p_type != PT_LOAD)
1136 			continue;
1137 
1138 		paddr = phdr_ptr->p_offset;
1139 		start = rounddown(paddr, PAGE_SIZE);
1140 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1141 		size = end - start;
1142 
1143 		/* Add this contiguous chunk of memory to vmcore list.*/
1144 		new = get_new_element();
1145 		if (!new)
1146 			return -ENOMEM;
1147 		new->paddr = start;
1148 		new->size = size;
1149 		list_add_tail(&new->list, vc_list);
1150 
1151 		/* Update the program header offset. */
1152 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1153 		vmcore_off = vmcore_off + size;
1154 	}
1155 	return 0;
1156 }
1157 
1158 static int __init process_ptload_program_headers_elf32(char *elfptr,
1159 						size_t elfsz,
1160 						size_t elfnotes_sz,
1161 						struct list_head *vc_list)
1162 {
1163 	int i;
1164 	Elf32_Ehdr *ehdr_ptr;
1165 	Elf32_Phdr *phdr_ptr;
1166 	loff_t vmcore_off;
1167 	struct vmcore *new;
1168 
1169 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1170 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1171 
1172 	/* Skip Elf header, program headers and Elf note segment. */
1173 	vmcore_off = elfsz + elfnotes_sz;
1174 
1175 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1176 		u64 paddr, start, end, size;
1177 
1178 		if (phdr_ptr->p_type != PT_LOAD)
1179 			continue;
1180 
1181 		paddr = phdr_ptr->p_offset;
1182 		start = rounddown(paddr, PAGE_SIZE);
1183 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1184 		size = end - start;
1185 
1186 		/* Add this contiguous chunk of memory to vmcore list.*/
1187 		new = get_new_element();
1188 		if (!new)
1189 			return -ENOMEM;
1190 		new->paddr = start;
1191 		new->size = size;
1192 		list_add_tail(&new->list, vc_list);
1193 
1194 		/* Update the program header offset */
1195 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1196 		vmcore_off = vmcore_off + size;
1197 	}
1198 	return 0;
1199 }
1200 
1201 /* Sets offset fields of vmcore elements. */
1202 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1203 				    struct list_head *vc_list)
1204 {
1205 	loff_t vmcore_off;
1206 	struct vmcore *m;
1207 
1208 	/* Skip Elf header, program headers and Elf note segment. */
1209 	vmcore_off = elfsz + elfnotes_sz;
1210 
1211 	list_for_each_entry(m, vc_list, list) {
1212 		m->offset = vmcore_off;
1213 		vmcore_off += m->size;
1214 	}
1215 }
1216 
1217 static void free_elfcorebuf(void)
1218 {
1219 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1220 	elfcorebuf = NULL;
1221 	vfree(elfnotes_buf);
1222 	elfnotes_buf = NULL;
1223 }
1224 
1225 static int __init parse_crash_elf64_headers(void)
1226 {
1227 	int rc=0;
1228 	Elf64_Ehdr ehdr;
1229 	u64 addr;
1230 
1231 	addr = elfcorehdr_addr;
1232 
1233 	/* Read Elf header */
1234 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1235 	if (rc < 0)
1236 		return rc;
1237 
1238 	/* Do some basic Verification. */
1239 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1240 		(ehdr.e_type != ET_CORE) ||
1241 		!vmcore_elf64_check_arch(&ehdr) ||
1242 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1243 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1244 		ehdr.e_version != EV_CURRENT ||
1245 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1246 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1247 		ehdr.e_phnum == 0) {
1248 		pr_warn("Warning: Core image elf header is not sane\n");
1249 		return -EINVAL;
1250 	}
1251 
1252 	/* Read in all elf headers. */
1253 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1254 				ehdr.e_phnum * sizeof(Elf64_Phdr);
1255 	elfcorebuf_sz = elfcorebuf_sz_orig;
1256 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1257 					      get_order(elfcorebuf_sz_orig));
1258 	if (!elfcorebuf)
1259 		return -ENOMEM;
1260 	addr = elfcorehdr_addr;
1261 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1262 	if (rc < 0)
1263 		goto fail;
1264 
1265 	/* Merge all PT_NOTE headers into one. */
1266 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1267 				      &elfnotes_buf, &elfnotes_sz);
1268 	if (rc)
1269 		goto fail;
1270 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1271 						  elfnotes_sz, &vmcore_list);
1272 	if (rc)
1273 		goto fail;
1274 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1275 	return 0;
1276 fail:
1277 	free_elfcorebuf();
1278 	return rc;
1279 }
1280 
1281 static int __init parse_crash_elf32_headers(void)
1282 {
1283 	int rc=0;
1284 	Elf32_Ehdr ehdr;
1285 	u64 addr;
1286 
1287 	addr = elfcorehdr_addr;
1288 
1289 	/* Read Elf header */
1290 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1291 	if (rc < 0)
1292 		return rc;
1293 
1294 	/* Do some basic Verification. */
1295 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1296 		(ehdr.e_type != ET_CORE) ||
1297 		!vmcore_elf32_check_arch(&ehdr) ||
1298 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1299 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1300 		ehdr.e_version != EV_CURRENT ||
1301 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1302 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1303 		ehdr.e_phnum == 0) {
1304 		pr_warn("Warning: Core image elf header is not sane\n");
1305 		return -EINVAL;
1306 	}
1307 
1308 	/* Read in all elf headers. */
1309 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1310 	elfcorebuf_sz = elfcorebuf_sz_orig;
1311 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1312 					      get_order(elfcorebuf_sz_orig));
1313 	if (!elfcorebuf)
1314 		return -ENOMEM;
1315 	addr = elfcorehdr_addr;
1316 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1317 	if (rc < 0)
1318 		goto fail;
1319 
1320 	/* Merge all PT_NOTE headers into one. */
1321 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1322 				      &elfnotes_buf, &elfnotes_sz);
1323 	if (rc)
1324 		goto fail;
1325 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1326 						  elfnotes_sz, &vmcore_list);
1327 	if (rc)
1328 		goto fail;
1329 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1330 	return 0;
1331 fail:
1332 	free_elfcorebuf();
1333 	return rc;
1334 }
1335 
1336 static int __init parse_crash_elf_headers(void)
1337 {
1338 	unsigned char e_ident[EI_NIDENT];
1339 	u64 addr;
1340 	int rc=0;
1341 
1342 	addr = elfcorehdr_addr;
1343 	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1344 	if (rc < 0)
1345 		return rc;
1346 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1347 		pr_warn("Warning: Core image elf header not found\n");
1348 		return -EINVAL;
1349 	}
1350 
1351 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1352 		rc = parse_crash_elf64_headers();
1353 		if (rc)
1354 			return rc;
1355 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1356 		rc = parse_crash_elf32_headers();
1357 		if (rc)
1358 			return rc;
1359 	} else {
1360 		pr_warn("Warning: Core image elf header is not sane\n");
1361 		return -EINVAL;
1362 	}
1363 
1364 	/* Determine vmcore size. */
1365 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1366 				      &vmcore_list);
1367 
1368 	return 0;
1369 }
1370 
1371 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1372 /**
1373  * vmcoredd_write_header - Write vmcore device dump header at the
1374  * beginning of the dump's buffer.
1375  * @buf: Output buffer where the note is written
1376  * @data: Dump info
1377  * @size: Size of the dump
1378  *
1379  * Fills beginning of the dump's buffer with vmcore device dump header.
1380  */
1381 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1382 				  u32 size)
1383 {
1384 	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1385 
1386 	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1387 	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1388 	vdd_hdr->n_type = NT_VMCOREDD;
1389 
1390 	strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1391 		sizeof(vdd_hdr->name));
1392 	memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1393 }
1394 
1395 /**
1396  * vmcoredd_update_program_headers - Update all Elf program headers
1397  * @elfptr: Pointer to elf header
1398  * @elfnotesz: Size of elf notes aligned to page size
1399  * @vmcoreddsz: Size of device dumps to be added to elf note header
1400  *
1401  * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1402  * Also update the offsets of all the program headers after the elf note header.
1403  */
1404 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1405 					    size_t vmcoreddsz)
1406 {
1407 	unsigned char *e_ident = (unsigned char *)elfptr;
1408 	u64 start, end, size;
1409 	loff_t vmcore_off;
1410 	u32 i;
1411 
1412 	vmcore_off = elfcorebuf_sz + elfnotesz;
1413 
1414 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1415 		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1416 		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1417 
1418 		/* Update all program headers */
1419 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1420 			if (phdr->p_type == PT_NOTE) {
1421 				/* Update note size */
1422 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1423 				phdr->p_filesz = phdr->p_memsz;
1424 				continue;
1425 			}
1426 
1427 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1428 			end = roundup(phdr->p_offset + phdr->p_memsz,
1429 				      PAGE_SIZE);
1430 			size = end - start;
1431 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1432 			vmcore_off += size;
1433 		}
1434 	} else {
1435 		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1436 		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1437 
1438 		/* Update all program headers */
1439 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1440 			if (phdr->p_type == PT_NOTE) {
1441 				/* Update note size */
1442 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1443 				phdr->p_filesz = phdr->p_memsz;
1444 				continue;
1445 			}
1446 
1447 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1448 			end = roundup(phdr->p_offset + phdr->p_memsz,
1449 				      PAGE_SIZE);
1450 			size = end - start;
1451 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1452 			vmcore_off += size;
1453 		}
1454 	}
1455 }
1456 
1457 /**
1458  * vmcoredd_update_size - Update the total size of the device dumps and update
1459  * Elf header
1460  * @dump_size: Size of the current device dump to be added to total size
1461  *
1462  * Update the total size of all the device dumps and update the Elf program
1463  * headers. Calculate the new offsets for the vmcore list and update the
1464  * total vmcore size.
1465  */
1466 static void vmcoredd_update_size(size_t dump_size)
1467 {
1468 	vmcoredd_orig_sz += dump_size;
1469 	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1470 	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1471 					vmcoredd_orig_sz);
1472 
1473 	/* Update vmcore list offsets */
1474 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1475 
1476 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1477 				      &vmcore_list);
1478 	proc_vmcore->size = vmcore_size;
1479 }
1480 
1481 /**
1482  * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1483  * @data: dump info.
1484  *
1485  * Allocate a buffer and invoke the calling driver's dump collect routine.
1486  * Write Elf note at the beginning of the buffer to indicate vmcore device
1487  * dump and add the dump to global list.
1488  */
1489 int vmcore_add_device_dump(struct vmcoredd_data *data)
1490 {
1491 	struct vmcoredd_node *dump;
1492 	void *buf = NULL;
1493 	size_t data_size;
1494 	int ret;
1495 
1496 	if (vmcoredd_disabled) {
1497 		pr_err_once("Device dump is disabled\n");
1498 		return -EINVAL;
1499 	}
1500 
1501 	if (!data || !strlen(data->dump_name) ||
1502 	    !data->vmcoredd_callback || !data->size)
1503 		return -EINVAL;
1504 
1505 	dump = vzalloc(sizeof(*dump));
1506 	if (!dump) {
1507 		ret = -ENOMEM;
1508 		goto out_err;
1509 	}
1510 
1511 	/* Keep size of the buffer page aligned so that it can be mmaped */
1512 	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1513 			    PAGE_SIZE);
1514 
1515 	/* Allocate buffer for driver's to write their dumps */
1516 	buf = vmcore_alloc_buf(data_size);
1517 	if (!buf) {
1518 		ret = -ENOMEM;
1519 		goto out_err;
1520 	}
1521 
1522 	vmcoredd_write_header(buf, data, data_size -
1523 			      sizeof(struct vmcoredd_header));
1524 
1525 	/* Invoke the driver's dump collection routing */
1526 	ret = data->vmcoredd_callback(data, buf +
1527 				      sizeof(struct vmcoredd_header));
1528 	if (ret)
1529 		goto out_err;
1530 
1531 	dump->buf = buf;
1532 	dump->size = data_size;
1533 
1534 	/* Add the dump to driver sysfs list */
1535 	mutex_lock(&vmcoredd_mutex);
1536 	list_add_tail(&dump->list, &vmcoredd_list);
1537 	mutex_unlock(&vmcoredd_mutex);
1538 
1539 	vmcoredd_update_size(data_size);
1540 	return 0;
1541 
1542 out_err:
1543 	vfree(buf);
1544 	vfree(dump);
1545 
1546 	return ret;
1547 }
1548 EXPORT_SYMBOL(vmcore_add_device_dump);
1549 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1550 
1551 /* Free all dumps in vmcore device dump list */
1552 static void vmcore_free_device_dumps(void)
1553 {
1554 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1555 	mutex_lock(&vmcoredd_mutex);
1556 	while (!list_empty(&vmcoredd_list)) {
1557 		struct vmcoredd_node *dump;
1558 
1559 		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1560 					list);
1561 		list_del(&dump->list);
1562 		vfree(dump->buf);
1563 		vfree(dump);
1564 	}
1565 	mutex_unlock(&vmcoredd_mutex);
1566 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1567 }
1568 
1569 /* Init function for vmcore module. */
1570 static int __init vmcore_init(void)
1571 {
1572 	int rc = 0;
1573 
1574 	/* Allow architectures to allocate ELF header in 2nd kernel */
1575 	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1576 	if (rc)
1577 		return rc;
1578 	/*
1579 	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1580 	 * then capture the dump.
1581 	 */
1582 	if (!(is_vmcore_usable()))
1583 		return rc;
1584 	rc = parse_crash_elf_headers();
1585 	if (rc) {
1586 		pr_warn("Kdump: vmcore not initialized\n");
1587 		return rc;
1588 	}
1589 	elfcorehdr_free(elfcorehdr_addr);
1590 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1591 
1592 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1593 	if (proc_vmcore)
1594 		proc_vmcore->size = vmcore_size;
1595 	return 0;
1596 }
1597 fs_initcall(vmcore_init);
1598 
1599 /* Cleanup function for vmcore module. */
1600 void vmcore_cleanup(void)
1601 {
1602 	if (proc_vmcore) {
1603 		proc_remove(proc_vmcore);
1604 		proc_vmcore = NULL;
1605 	}
1606 
1607 	/* clear the vmcore list. */
1608 	while (!list_empty(&vmcore_list)) {
1609 		struct vmcore *m;
1610 
1611 		m = list_first_entry(&vmcore_list, struct vmcore, list);
1612 		list_del(&m->list);
1613 		kfree(m);
1614 	}
1615 	free_elfcorebuf();
1616 
1617 	/* clear vmcore device dump list */
1618 	vmcore_free_device_dumps();
1619 }
1620