xref: /linux/fs/proc/vmcore.c (revision 68550cbc6129159b7a6434796b721e8b66ee12f6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	fs/proc/vmcore.c Interface for accessing the crash
4  * 				 dump from the system's previous life.
5  * 	Heavily borrowed from fs/proc/kcore.c
6  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7  *	Copyright (C) IBM Corporation, 2004. All rights reserved
8  *
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uaccess.h>
29 #include <linux/cc_platform.h>
30 #include <asm/io.h>
31 #include "internal.h"
32 
33 /* List representing chunks of contiguous memory areas and their offsets in
34  * vmcore file.
35  */
36 static LIST_HEAD(vmcore_list);
37 
38 /* Stores the pointer to the buffer containing kernel elf core headers. */
39 static char *elfcorebuf;
40 static size_t elfcorebuf_sz;
41 static size_t elfcorebuf_sz_orig;
42 
43 static char *elfnotes_buf;
44 static size_t elfnotes_sz;
45 /* Size of all notes minus the device dump notes */
46 static size_t elfnotes_orig_sz;
47 
48 /* Total size of vmcore file. */
49 static u64 vmcore_size;
50 
51 static struct proc_dir_entry *proc_vmcore;
52 
53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54 /* Device Dump list and mutex to synchronize access to list */
55 static LIST_HEAD(vmcoredd_list);
56 static DEFINE_MUTEX(vmcoredd_mutex);
57 
58 static bool vmcoredd_disabled;
59 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
61 
62 /* Device Dump Size */
63 static size_t vmcoredd_orig_sz;
64 
65 static DECLARE_RWSEM(vmcore_cb_rwsem);
66 /* List of registered vmcore callbacks. */
67 static LIST_HEAD(vmcore_cb_list);
68 /* Whether we had a surprise unregistration of a callback. */
69 static bool vmcore_cb_unstable;
70 /* Whether the vmcore has been opened once. */
71 static bool vmcore_opened;
72 
73 void register_vmcore_cb(struct vmcore_cb *cb)
74 {
75 	down_write(&vmcore_cb_rwsem);
76 	INIT_LIST_HEAD(&cb->next);
77 	list_add_tail(&cb->next, &vmcore_cb_list);
78 	/*
79 	 * Registering a vmcore callback after the vmcore was opened is
80 	 * very unusual (e.g., manual driver loading).
81 	 */
82 	if (vmcore_opened)
83 		pr_warn_once("Unexpected vmcore callback registration\n");
84 	up_write(&vmcore_cb_rwsem);
85 }
86 EXPORT_SYMBOL_GPL(register_vmcore_cb);
87 
88 void unregister_vmcore_cb(struct vmcore_cb *cb)
89 {
90 	down_write(&vmcore_cb_rwsem);
91 	list_del(&cb->next);
92 	/*
93 	 * Unregistering a vmcore callback after the vmcore was opened is
94 	 * very unusual (e.g., forced driver removal), but we cannot stop
95 	 * unregistering.
96 	 */
97 	if (vmcore_opened) {
98 		pr_warn_once("Unexpected vmcore callback unregistration\n");
99 		vmcore_cb_unstable = true;
100 	}
101 	up_write(&vmcore_cb_rwsem);
102 }
103 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
104 
105 static bool pfn_is_ram(unsigned long pfn)
106 {
107 	struct vmcore_cb *cb;
108 	bool ret = true;
109 
110 	lockdep_assert_held_read(&vmcore_cb_rwsem);
111 	if (unlikely(vmcore_cb_unstable))
112 		return false;
113 
114 	list_for_each_entry(cb, &vmcore_cb_list, next) {
115 		if (unlikely(!cb->pfn_is_ram))
116 			continue;
117 		ret = cb->pfn_is_ram(cb, pfn);
118 		if (!ret)
119 			break;
120 	}
121 
122 	return ret;
123 }
124 
125 static int open_vmcore(struct inode *inode, struct file *file)
126 {
127 	down_read(&vmcore_cb_rwsem);
128 	vmcore_opened = true;
129 	up_read(&vmcore_cb_rwsem);
130 
131 	return 0;
132 }
133 
134 /* Reads a page from the oldmem device from given offset. */
135 ssize_t read_from_oldmem(char *buf, size_t count,
136 			 u64 *ppos, int userbuf,
137 			 bool encrypted)
138 {
139 	unsigned long pfn, offset;
140 	size_t nr_bytes;
141 	ssize_t read = 0, tmp;
142 
143 	if (!count)
144 		return 0;
145 
146 	offset = (unsigned long)(*ppos % PAGE_SIZE);
147 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
148 
149 	down_read(&vmcore_cb_rwsem);
150 	do {
151 		if (count > (PAGE_SIZE - offset))
152 			nr_bytes = PAGE_SIZE - offset;
153 		else
154 			nr_bytes = count;
155 
156 		/* If pfn is not ram, return zeros for sparse dump files */
157 		if (!pfn_is_ram(pfn)) {
158 			tmp = 0;
159 			if (!userbuf)
160 				memset(buf, 0, nr_bytes);
161 			else if (clear_user(buf, nr_bytes))
162 				tmp = -EFAULT;
163 		} else {
164 			if (encrypted)
165 				tmp = copy_oldmem_page_encrypted(pfn, buf,
166 								 nr_bytes,
167 								 offset,
168 								 userbuf);
169 			else
170 				tmp = copy_oldmem_page(pfn, buf, nr_bytes,
171 						       offset, userbuf);
172 		}
173 		if (tmp < 0) {
174 			up_read(&vmcore_cb_rwsem);
175 			return tmp;
176 		}
177 
178 		*ppos += nr_bytes;
179 		count -= nr_bytes;
180 		buf += nr_bytes;
181 		read += nr_bytes;
182 		++pfn;
183 		offset = 0;
184 	} while (count);
185 
186 	up_read(&vmcore_cb_rwsem);
187 	return read;
188 }
189 
190 /*
191  * Architectures may override this function to allocate ELF header in 2nd kernel
192  */
193 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
194 {
195 	return 0;
196 }
197 
198 /*
199  * Architectures may override this function to free header
200  */
201 void __weak elfcorehdr_free(unsigned long long addr)
202 {}
203 
204 /*
205  * Architectures may override this function to read from ELF header
206  */
207 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
208 {
209 	return read_from_oldmem(buf, count, ppos, 0, false);
210 }
211 
212 /*
213  * Architectures may override this function to read from notes sections
214  */
215 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
216 {
217 	return read_from_oldmem(buf, count, ppos, 0, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
218 }
219 
220 /*
221  * Architectures may override this function to map oldmem
222  */
223 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
224 				  unsigned long from, unsigned long pfn,
225 				  unsigned long size, pgprot_t prot)
226 {
227 	prot = pgprot_encrypted(prot);
228 	return remap_pfn_range(vma, from, pfn, size, prot);
229 }
230 
231 /*
232  * Architectures which support memory encryption override this.
233  */
234 ssize_t __weak
235 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
236 			   unsigned long offset, int userbuf)
237 {
238 	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
239 }
240 
241 /*
242  * Copy to either kernel or user space
243  */
244 static int copy_to(void *target, void *src, size_t size, int userbuf)
245 {
246 	if (userbuf) {
247 		if (copy_to_user((char __user *) target, src, size))
248 			return -EFAULT;
249 	} else {
250 		memcpy(target, src, size);
251 	}
252 	return 0;
253 }
254 
255 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
256 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
257 {
258 	struct vmcoredd_node *dump;
259 	u64 offset = 0;
260 	int ret = 0;
261 	size_t tsz;
262 	char *buf;
263 
264 	mutex_lock(&vmcoredd_mutex);
265 	list_for_each_entry(dump, &vmcoredd_list, list) {
266 		if (start < offset + dump->size) {
267 			tsz = min(offset + (u64)dump->size - start, (u64)size);
268 			buf = dump->buf + start - offset;
269 			if (copy_to(dst, buf, tsz, userbuf)) {
270 				ret = -EFAULT;
271 				goto out_unlock;
272 			}
273 
274 			size -= tsz;
275 			start += tsz;
276 			dst += tsz;
277 
278 			/* Leave now if buffer filled already */
279 			if (!size)
280 				goto out_unlock;
281 		}
282 		offset += dump->size;
283 	}
284 
285 out_unlock:
286 	mutex_unlock(&vmcoredd_mutex);
287 	return ret;
288 }
289 
290 #ifdef CONFIG_MMU
291 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
292 			       u64 start, size_t size)
293 {
294 	struct vmcoredd_node *dump;
295 	u64 offset = 0;
296 	int ret = 0;
297 	size_t tsz;
298 	char *buf;
299 
300 	mutex_lock(&vmcoredd_mutex);
301 	list_for_each_entry(dump, &vmcoredd_list, list) {
302 		if (start < offset + dump->size) {
303 			tsz = min(offset + (u64)dump->size - start, (u64)size);
304 			buf = dump->buf + start - offset;
305 			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
306 							tsz)) {
307 				ret = -EFAULT;
308 				goto out_unlock;
309 			}
310 
311 			size -= tsz;
312 			start += tsz;
313 			dst += tsz;
314 
315 			/* Leave now if buffer filled already */
316 			if (!size)
317 				goto out_unlock;
318 		}
319 		offset += dump->size;
320 	}
321 
322 out_unlock:
323 	mutex_unlock(&vmcoredd_mutex);
324 	return ret;
325 }
326 #endif /* CONFIG_MMU */
327 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
328 
329 /* Read from the ELF header and then the crash dump. On error, negative value is
330  * returned otherwise number of bytes read are returned.
331  */
332 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
333 			     int userbuf)
334 {
335 	ssize_t acc = 0, tmp;
336 	size_t tsz;
337 	u64 start;
338 	struct vmcore *m = NULL;
339 
340 	if (buflen == 0 || *fpos >= vmcore_size)
341 		return 0;
342 
343 	/* trim buflen to not go beyond EOF */
344 	if (buflen > vmcore_size - *fpos)
345 		buflen = vmcore_size - *fpos;
346 
347 	/* Read ELF core header */
348 	if (*fpos < elfcorebuf_sz) {
349 		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
350 		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
351 			return -EFAULT;
352 		buflen -= tsz;
353 		*fpos += tsz;
354 		buffer += tsz;
355 		acc += tsz;
356 
357 		/* leave now if filled buffer already */
358 		if (buflen == 0)
359 			return acc;
360 	}
361 
362 	/* Read Elf note segment */
363 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
364 		void *kaddr;
365 
366 		/* We add device dumps before other elf notes because the
367 		 * other elf notes may not fill the elf notes buffer
368 		 * completely and we will end up with zero-filled data
369 		 * between the elf notes and the device dumps. Tools will
370 		 * then try to decode this zero-filled data as valid notes
371 		 * and we don't want that. Hence, adding device dumps before
372 		 * the other elf notes ensure that zero-filled data can be
373 		 * avoided.
374 		 */
375 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
376 		/* Read device dumps */
377 		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
378 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
379 				  (size_t)*fpos, buflen);
380 			start = *fpos - elfcorebuf_sz;
381 			if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
382 				return -EFAULT;
383 
384 			buflen -= tsz;
385 			*fpos += tsz;
386 			buffer += tsz;
387 			acc += tsz;
388 
389 			/* leave now if filled buffer already */
390 			if (!buflen)
391 				return acc;
392 		}
393 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
394 
395 		/* Read remaining elf notes */
396 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
397 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
398 		if (copy_to(buffer, kaddr, tsz, userbuf))
399 			return -EFAULT;
400 
401 		buflen -= tsz;
402 		*fpos += tsz;
403 		buffer += tsz;
404 		acc += tsz;
405 
406 		/* leave now if filled buffer already */
407 		if (buflen == 0)
408 			return acc;
409 	}
410 
411 	list_for_each_entry(m, &vmcore_list, list) {
412 		if (*fpos < m->offset + m->size) {
413 			tsz = (size_t)min_t(unsigned long long,
414 					    m->offset + m->size - *fpos,
415 					    buflen);
416 			start = m->paddr + *fpos - m->offset;
417 			tmp = read_from_oldmem(buffer, tsz, &start,
418 					       userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT));
419 			if (tmp < 0)
420 				return tmp;
421 			buflen -= tsz;
422 			*fpos += tsz;
423 			buffer += tsz;
424 			acc += tsz;
425 
426 			/* leave now if filled buffer already */
427 			if (buflen == 0)
428 				return acc;
429 		}
430 	}
431 
432 	return acc;
433 }
434 
435 static ssize_t read_vmcore(struct file *file, char __user *buffer,
436 			   size_t buflen, loff_t *fpos)
437 {
438 	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
439 }
440 
441 /*
442  * The vmcore fault handler uses the page cache and fills data using the
443  * standard __vmcore_read() function.
444  *
445  * On s390 the fault handler is used for memory regions that can't be mapped
446  * directly with remap_pfn_range().
447  */
448 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
449 {
450 #ifdef CONFIG_S390
451 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
452 	pgoff_t index = vmf->pgoff;
453 	struct page *page;
454 	loff_t offset;
455 	char *buf;
456 	int rc;
457 
458 	page = find_or_create_page(mapping, index, GFP_KERNEL);
459 	if (!page)
460 		return VM_FAULT_OOM;
461 	if (!PageUptodate(page)) {
462 		offset = (loff_t) index << PAGE_SHIFT;
463 		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
464 		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
465 		if (rc < 0) {
466 			unlock_page(page);
467 			put_page(page);
468 			return vmf_error(rc);
469 		}
470 		SetPageUptodate(page);
471 	}
472 	unlock_page(page);
473 	vmf->page = page;
474 	return 0;
475 #else
476 	return VM_FAULT_SIGBUS;
477 #endif
478 }
479 
480 static const struct vm_operations_struct vmcore_mmap_ops = {
481 	.fault = mmap_vmcore_fault,
482 };
483 
484 /**
485  * vmcore_alloc_buf - allocate buffer in vmalloc memory
486  * @sizez: size of buffer
487  *
488  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
489  * the buffer to user-space by means of remap_vmalloc_range().
490  *
491  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
492  * disabled and there's no need to allow users to mmap the buffer.
493  */
494 static inline char *vmcore_alloc_buf(size_t size)
495 {
496 #ifdef CONFIG_MMU
497 	return vmalloc_user(size);
498 #else
499 	return vzalloc(size);
500 #endif
501 }
502 
503 /*
504  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
505  * essential for mmap_vmcore() in order to map physically
506  * non-contiguous objects (ELF header, ELF note segment and memory
507  * regions in the 1st kernel pointed to by PT_LOAD entries) into
508  * virtually contiguous user-space in ELF layout.
509  */
510 #ifdef CONFIG_MMU
511 /*
512  * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
513  * reported as not being ram with the zero page.
514  *
515  * @vma: vm_area_struct describing requested mapping
516  * @from: start remapping from
517  * @pfn: page frame number to start remapping to
518  * @size: remapping size
519  * @prot: protection bits
520  *
521  * Returns zero on success, -EAGAIN on failure.
522  */
523 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
524 				    unsigned long from, unsigned long pfn,
525 				    unsigned long size, pgprot_t prot)
526 {
527 	unsigned long map_size;
528 	unsigned long pos_start, pos_end, pos;
529 	unsigned long zeropage_pfn = my_zero_pfn(0);
530 	size_t len = 0;
531 
532 	pos_start = pfn;
533 	pos_end = pfn + (size >> PAGE_SHIFT);
534 
535 	for (pos = pos_start; pos < pos_end; ++pos) {
536 		if (!pfn_is_ram(pos)) {
537 			/*
538 			 * We hit a page which is not ram. Remap the continuous
539 			 * region between pos_start and pos-1 and replace
540 			 * the non-ram page at pos with the zero page.
541 			 */
542 			if (pos > pos_start) {
543 				/* Remap continuous region */
544 				map_size = (pos - pos_start) << PAGE_SHIFT;
545 				if (remap_oldmem_pfn_range(vma, from + len,
546 							   pos_start, map_size,
547 							   prot))
548 					goto fail;
549 				len += map_size;
550 			}
551 			/* Remap the zero page */
552 			if (remap_oldmem_pfn_range(vma, from + len,
553 						   zeropage_pfn,
554 						   PAGE_SIZE, prot))
555 				goto fail;
556 			len += PAGE_SIZE;
557 			pos_start = pos + 1;
558 		}
559 	}
560 	if (pos > pos_start) {
561 		/* Remap the rest */
562 		map_size = (pos - pos_start) << PAGE_SHIFT;
563 		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
564 					   map_size, prot))
565 			goto fail;
566 	}
567 	return 0;
568 fail:
569 	do_munmap(vma->vm_mm, from, len, NULL);
570 	return -EAGAIN;
571 }
572 
573 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
574 			    unsigned long from, unsigned long pfn,
575 			    unsigned long size, pgprot_t prot)
576 {
577 	int ret;
578 
579 	/*
580 	 * Check if oldmem_pfn_is_ram was registered to avoid
581 	 * looping over all pages without a reason.
582 	 */
583 	down_read(&vmcore_cb_rwsem);
584 	if (!list_empty(&vmcore_cb_list) || vmcore_cb_unstable)
585 		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
586 	else
587 		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
588 	up_read(&vmcore_cb_rwsem);
589 	return ret;
590 }
591 
592 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
593 {
594 	size_t size = vma->vm_end - vma->vm_start;
595 	u64 start, end, len, tsz;
596 	struct vmcore *m;
597 
598 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
599 	end = start + size;
600 
601 	if (size > vmcore_size || end > vmcore_size)
602 		return -EINVAL;
603 
604 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
605 		return -EPERM;
606 
607 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
608 	vma->vm_flags |= VM_MIXEDMAP;
609 	vma->vm_ops = &vmcore_mmap_ops;
610 
611 	len = 0;
612 
613 	if (start < elfcorebuf_sz) {
614 		u64 pfn;
615 
616 		tsz = min(elfcorebuf_sz - (size_t)start, size);
617 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
618 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
619 				    vma->vm_page_prot))
620 			return -EAGAIN;
621 		size -= tsz;
622 		start += tsz;
623 		len += tsz;
624 
625 		if (size == 0)
626 			return 0;
627 	}
628 
629 	if (start < elfcorebuf_sz + elfnotes_sz) {
630 		void *kaddr;
631 
632 		/* We add device dumps before other elf notes because the
633 		 * other elf notes may not fill the elf notes buffer
634 		 * completely and we will end up with zero-filled data
635 		 * between the elf notes and the device dumps. Tools will
636 		 * then try to decode this zero-filled data as valid notes
637 		 * and we don't want that. Hence, adding device dumps before
638 		 * the other elf notes ensure that zero-filled data can be
639 		 * avoided. This also ensures that the device dumps and
640 		 * other elf notes can be properly mmaped at page aligned
641 		 * address.
642 		 */
643 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
644 		/* Read device dumps */
645 		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
646 			u64 start_off;
647 
648 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
649 				  (size_t)start, size);
650 			start_off = start - elfcorebuf_sz;
651 			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
652 						start_off, tsz))
653 				goto fail;
654 
655 			size -= tsz;
656 			start += tsz;
657 			len += tsz;
658 
659 			/* leave now if filled buffer already */
660 			if (!size)
661 				return 0;
662 		}
663 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
664 
665 		/* Read remaining elf notes */
666 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
667 		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
668 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
669 						kaddr, 0, tsz))
670 			goto fail;
671 
672 		size -= tsz;
673 		start += tsz;
674 		len += tsz;
675 
676 		if (size == 0)
677 			return 0;
678 	}
679 
680 	list_for_each_entry(m, &vmcore_list, list) {
681 		if (start < m->offset + m->size) {
682 			u64 paddr = 0;
683 
684 			tsz = (size_t)min_t(unsigned long long,
685 					    m->offset + m->size - start, size);
686 			paddr = m->paddr + start - m->offset;
687 			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
688 						    paddr >> PAGE_SHIFT, tsz,
689 						    vma->vm_page_prot))
690 				goto fail;
691 			size -= tsz;
692 			start += tsz;
693 			len += tsz;
694 
695 			if (size == 0)
696 				return 0;
697 		}
698 	}
699 
700 	return 0;
701 fail:
702 	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
703 	return -EAGAIN;
704 }
705 #else
706 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
707 {
708 	return -ENOSYS;
709 }
710 #endif
711 
712 static const struct proc_ops vmcore_proc_ops = {
713 	.proc_open	= open_vmcore,
714 	.proc_read	= read_vmcore,
715 	.proc_lseek	= default_llseek,
716 	.proc_mmap	= mmap_vmcore,
717 };
718 
719 static struct vmcore* __init get_new_element(void)
720 {
721 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
722 }
723 
724 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
725 			   struct list_head *vc_list)
726 {
727 	u64 size;
728 	struct vmcore *m;
729 
730 	size = elfsz + elfnotesegsz;
731 	list_for_each_entry(m, vc_list, list) {
732 		size += m->size;
733 	}
734 	return size;
735 }
736 
737 /**
738  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
739  *
740  * @ehdr_ptr: ELF header
741  *
742  * This function updates p_memsz member of each PT_NOTE entry in the
743  * program header table pointed to by @ehdr_ptr to real size of ELF
744  * note segment.
745  */
746 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
747 {
748 	int i, rc=0;
749 	Elf64_Phdr *phdr_ptr;
750 	Elf64_Nhdr *nhdr_ptr;
751 
752 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
753 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
754 		void *notes_section;
755 		u64 offset, max_sz, sz, real_sz = 0;
756 		if (phdr_ptr->p_type != PT_NOTE)
757 			continue;
758 		max_sz = phdr_ptr->p_memsz;
759 		offset = phdr_ptr->p_offset;
760 		notes_section = kmalloc(max_sz, GFP_KERNEL);
761 		if (!notes_section)
762 			return -ENOMEM;
763 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
764 		if (rc < 0) {
765 			kfree(notes_section);
766 			return rc;
767 		}
768 		nhdr_ptr = notes_section;
769 		while (nhdr_ptr->n_namesz != 0) {
770 			sz = sizeof(Elf64_Nhdr) +
771 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
772 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
773 			if ((real_sz + sz) > max_sz) {
774 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
775 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
776 				break;
777 			}
778 			real_sz += sz;
779 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
780 		}
781 		kfree(notes_section);
782 		phdr_ptr->p_memsz = real_sz;
783 		if (real_sz == 0) {
784 			pr_warn("Warning: Zero PT_NOTE entries found\n");
785 		}
786 	}
787 
788 	return 0;
789 }
790 
791 /**
792  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
793  * headers and sum of real size of their ELF note segment headers and
794  * data.
795  *
796  * @ehdr_ptr: ELF header
797  * @nr_ptnote: buffer for the number of PT_NOTE program headers
798  * @sz_ptnote: buffer for size of unique PT_NOTE program header
799  *
800  * This function is used to merge multiple PT_NOTE program headers
801  * into a unique single one. The resulting unique entry will have
802  * @sz_ptnote in its phdr->p_mem.
803  *
804  * It is assumed that program headers with PT_NOTE type pointed to by
805  * @ehdr_ptr has already been updated by update_note_header_size_elf64
806  * and each of PT_NOTE program headers has actual ELF note segment
807  * size in its p_memsz member.
808  */
809 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
810 						 int *nr_ptnote, u64 *sz_ptnote)
811 {
812 	int i;
813 	Elf64_Phdr *phdr_ptr;
814 
815 	*nr_ptnote = *sz_ptnote = 0;
816 
817 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
818 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
819 		if (phdr_ptr->p_type != PT_NOTE)
820 			continue;
821 		*nr_ptnote += 1;
822 		*sz_ptnote += phdr_ptr->p_memsz;
823 	}
824 
825 	return 0;
826 }
827 
828 /**
829  * copy_notes_elf64 - copy ELF note segments in a given buffer
830  *
831  * @ehdr_ptr: ELF header
832  * @notes_buf: buffer into which ELF note segments are copied
833  *
834  * This function is used to copy ELF note segment in the 1st kernel
835  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
836  * size of the buffer @notes_buf is equal to or larger than sum of the
837  * real ELF note segment headers and data.
838  *
839  * It is assumed that program headers with PT_NOTE type pointed to by
840  * @ehdr_ptr has already been updated by update_note_header_size_elf64
841  * and each of PT_NOTE program headers has actual ELF note segment
842  * size in its p_memsz member.
843  */
844 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
845 {
846 	int i, rc=0;
847 	Elf64_Phdr *phdr_ptr;
848 
849 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
850 
851 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
852 		u64 offset;
853 		if (phdr_ptr->p_type != PT_NOTE)
854 			continue;
855 		offset = phdr_ptr->p_offset;
856 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
857 					   &offset);
858 		if (rc < 0)
859 			return rc;
860 		notes_buf += phdr_ptr->p_memsz;
861 	}
862 
863 	return 0;
864 }
865 
866 /* Merges all the PT_NOTE headers into one. */
867 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
868 					   char **notes_buf, size_t *notes_sz)
869 {
870 	int i, nr_ptnote=0, rc=0;
871 	char *tmp;
872 	Elf64_Ehdr *ehdr_ptr;
873 	Elf64_Phdr phdr;
874 	u64 phdr_sz = 0, note_off;
875 
876 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
877 
878 	rc = update_note_header_size_elf64(ehdr_ptr);
879 	if (rc < 0)
880 		return rc;
881 
882 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
883 	if (rc < 0)
884 		return rc;
885 
886 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
887 	*notes_buf = vmcore_alloc_buf(*notes_sz);
888 	if (!*notes_buf)
889 		return -ENOMEM;
890 
891 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
892 	if (rc < 0)
893 		return rc;
894 
895 	/* Prepare merged PT_NOTE program header. */
896 	phdr.p_type    = PT_NOTE;
897 	phdr.p_flags   = 0;
898 	note_off = sizeof(Elf64_Ehdr) +
899 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
900 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
901 	phdr.p_vaddr   = phdr.p_paddr = 0;
902 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
903 	phdr.p_align   = 0;
904 
905 	/* Add merged PT_NOTE program header*/
906 	tmp = elfptr + sizeof(Elf64_Ehdr);
907 	memcpy(tmp, &phdr, sizeof(phdr));
908 	tmp += sizeof(phdr);
909 
910 	/* Remove unwanted PT_NOTE program headers. */
911 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
912 	*elfsz = *elfsz - i;
913 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
914 	memset(elfptr + *elfsz, 0, i);
915 	*elfsz = roundup(*elfsz, PAGE_SIZE);
916 
917 	/* Modify e_phnum to reflect merged headers. */
918 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
919 
920 	/* Store the size of all notes.  We need this to update the note
921 	 * header when the device dumps will be added.
922 	 */
923 	elfnotes_orig_sz = phdr.p_memsz;
924 
925 	return 0;
926 }
927 
928 /**
929  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
930  *
931  * @ehdr_ptr: ELF header
932  *
933  * This function updates p_memsz member of each PT_NOTE entry in the
934  * program header table pointed to by @ehdr_ptr to real size of ELF
935  * note segment.
936  */
937 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
938 {
939 	int i, rc=0;
940 	Elf32_Phdr *phdr_ptr;
941 	Elf32_Nhdr *nhdr_ptr;
942 
943 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
944 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
945 		void *notes_section;
946 		u64 offset, max_sz, sz, real_sz = 0;
947 		if (phdr_ptr->p_type != PT_NOTE)
948 			continue;
949 		max_sz = phdr_ptr->p_memsz;
950 		offset = phdr_ptr->p_offset;
951 		notes_section = kmalloc(max_sz, GFP_KERNEL);
952 		if (!notes_section)
953 			return -ENOMEM;
954 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
955 		if (rc < 0) {
956 			kfree(notes_section);
957 			return rc;
958 		}
959 		nhdr_ptr = notes_section;
960 		while (nhdr_ptr->n_namesz != 0) {
961 			sz = sizeof(Elf32_Nhdr) +
962 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
963 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
964 			if ((real_sz + sz) > max_sz) {
965 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
966 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
967 				break;
968 			}
969 			real_sz += sz;
970 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
971 		}
972 		kfree(notes_section);
973 		phdr_ptr->p_memsz = real_sz;
974 		if (real_sz == 0) {
975 			pr_warn("Warning: Zero PT_NOTE entries found\n");
976 		}
977 	}
978 
979 	return 0;
980 }
981 
982 /**
983  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
984  * headers and sum of real size of their ELF note segment headers and
985  * data.
986  *
987  * @ehdr_ptr: ELF header
988  * @nr_ptnote: buffer for the number of PT_NOTE program headers
989  * @sz_ptnote: buffer for size of unique PT_NOTE program header
990  *
991  * This function is used to merge multiple PT_NOTE program headers
992  * into a unique single one. The resulting unique entry will have
993  * @sz_ptnote in its phdr->p_mem.
994  *
995  * It is assumed that program headers with PT_NOTE type pointed to by
996  * @ehdr_ptr has already been updated by update_note_header_size_elf32
997  * and each of PT_NOTE program headers has actual ELF note segment
998  * size in its p_memsz member.
999  */
1000 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
1001 						 int *nr_ptnote, u64 *sz_ptnote)
1002 {
1003 	int i;
1004 	Elf32_Phdr *phdr_ptr;
1005 
1006 	*nr_ptnote = *sz_ptnote = 0;
1007 
1008 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
1009 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1010 		if (phdr_ptr->p_type != PT_NOTE)
1011 			continue;
1012 		*nr_ptnote += 1;
1013 		*sz_ptnote += phdr_ptr->p_memsz;
1014 	}
1015 
1016 	return 0;
1017 }
1018 
1019 /**
1020  * copy_notes_elf32 - copy ELF note segments in a given buffer
1021  *
1022  * @ehdr_ptr: ELF header
1023  * @notes_buf: buffer into which ELF note segments are copied
1024  *
1025  * This function is used to copy ELF note segment in the 1st kernel
1026  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1027  * size of the buffer @notes_buf is equal to or larger than sum of the
1028  * real ELF note segment headers and data.
1029  *
1030  * It is assumed that program headers with PT_NOTE type pointed to by
1031  * @ehdr_ptr has already been updated by update_note_header_size_elf32
1032  * and each of PT_NOTE program headers has actual ELF note segment
1033  * size in its p_memsz member.
1034  */
1035 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1036 {
1037 	int i, rc=0;
1038 	Elf32_Phdr *phdr_ptr;
1039 
1040 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1041 
1042 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1043 		u64 offset;
1044 		if (phdr_ptr->p_type != PT_NOTE)
1045 			continue;
1046 		offset = phdr_ptr->p_offset;
1047 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1048 					   &offset);
1049 		if (rc < 0)
1050 			return rc;
1051 		notes_buf += phdr_ptr->p_memsz;
1052 	}
1053 
1054 	return 0;
1055 }
1056 
1057 /* Merges all the PT_NOTE headers into one. */
1058 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1059 					   char **notes_buf, size_t *notes_sz)
1060 {
1061 	int i, nr_ptnote=0, rc=0;
1062 	char *tmp;
1063 	Elf32_Ehdr *ehdr_ptr;
1064 	Elf32_Phdr phdr;
1065 	u64 phdr_sz = 0, note_off;
1066 
1067 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1068 
1069 	rc = update_note_header_size_elf32(ehdr_ptr);
1070 	if (rc < 0)
1071 		return rc;
1072 
1073 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1074 	if (rc < 0)
1075 		return rc;
1076 
1077 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1078 	*notes_buf = vmcore_alloc_buf(*notes_sz);
1079 	if (!*notes_buf)
1080 		return -ENOMEM;
1081 
1082 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1083 	if (rc < 0)
1084 		return rc;
1085 
1086 	/* Prepare merged PT_NOTE program header. */
1087 	phdr.p_type    = PT_NOTE;
1088 	phdr.p_flags   = 0;
1089 	note_off = sizeof(Elf32_Ehdr) +
1090 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1091 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1092 	phdr.p_vaddr   = phdr.p_paddr = 0;
1093 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1094 	phdr.p_align   = 0;
1095 
1096 	/* Add merged PT_NOTE program header*/
1097 	tmp = elfptr + sizeof(Elf32_Ehdr);
1098 	memcpy(tmp, &phdr, sizeof(phdr));
1099 	tmp += sizeof(phdr);
1100 
1101 	/* Remove unwanted PT_NOTE program headers. */
1102 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1103 	*elfsz = *elfsz - i;
1104 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1105 	memset(elfptr + *elfsz, 0, i);
1106 	*elfsz = roundup(*elfsz, PAGE_SIZE);
1107 
1108 	/* Modify e_phnum to reflect merged headers. */
1109 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1110 
1111 	/* Store the size of all notes.  We need this to update the note
1112 	 * header when the device dumps will be added.
1113 	 */
1114 	elfnotes_orig_sz = phdr.p_memsz;
1115 
1116 	return 0;
1117 }
1118 
1119 /* Add memory chunks represented by program headers to vmcore list. Also update
1120  * the new offset fields of exported program headers. */
1121 static int __init process_ptload_program_headers_elf64(char *elfptr,
1122 						size_t elfsz,
1123 						size_t elfnotes_sz,
1124 						struct list_head *vc_list)
1125 {
1126 	int i;
1127 	Elf64_Ehdr *ehdr_ptr;
1128 	Elf64_Phdr *phdr_ptr;
1129 	loff_t vmcore_off;
1130 	struct vmcore *new;
1131 
1132 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1133 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1134 
1135 	/* Skip Elf header, program headers and Elf note segment. */
1136 	vmcore_off = elfsz + elfnotes_sz;
1137 
1138 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1139 		u64 paddr, start, end, size;
1140 
1141 		if (phdr_ptr->p_type != PT_LOAD)
1142 			continue;
1143 
1144 		paddr = phdr_ptr->p_offset;
1145 		start = rounddown(paddr, PAGE_SIZE);
1146 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1147 		size = end - start;
1148 
1149 		/* Add this contiguous chunk of memory to vmcore list.*/
1150 		new = get_new_element();
1151 		if (!new)
1152 			return -ENOMEM;
1153 		new->paddr = start;
1154 		new->size = size;
1155 		list_add_tail(&new->list, vc_list);
1156 
1157 		/* Update the program header offset. */
1158 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1159 		vmcore_off = vmcore_off + size;
1160 	}
1161 	return 0;
1162 }
1163 
1164 static int __init process_ptload_program_headers_elf32(char *elfptr,
1165 						size_t elfsz,
1166 						size_t elfnotes_sz,
1167 						struct list_head *vc_list)
1168 {
1169 	int i;
1170 	Elf32_Ehdr *ehdr_ptr;
1171 	Elf32_Phdr *phdr_ptr;
1172 	loff_t vmcore_off;
1173 	struct vmcore *new;
1174 
1175 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1176 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1177 
1178 	/* Skip Elf header, program headers and Elf note segment. */
1179 	vmcore_off = elfsz + elfnotes_sz;
1180 
1181 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1182 		u64 paddr, start, end, size;
1183 
1184 		if (phdr_ptr->p_type != PT_LOAD)
1185 			continue;
1186 
1187 		paddr = phdr_ptr->p_offset;
1188 		start = rounddown(paddr, PAGE_SIZE);
1189 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1190 		size = end - start;
1191 
1192 		/* Add this contiguous chunk of memory to vmcore list.*/
1193 		new = get_new_element();
1194 		if (!new)
1195 			return -ENOMEM;
1196 		new->paddr = start;
1197 		new->size = size;
1198 		list_add_tail(&new->list, vc_list);
1199 
1200 		/* Update the program header offset */
1201 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1202 		vmcore_off = vmcore_off + size;
1203 	}
1204 	return 0;
1205 }
1206 
1207 /* Sets offset fields of vmcore elements. */
1208 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1209 				    struct list_head *vc_list)
1210 {
1211 	loff_t vmcore_off;
1212 	struct vmcore *m;
1213 
1214 	/* Skip Elf header, program headers and Elf note segment. */
1215 	vmcore_off = elfsz + elfnotes_sz;
1216 
1217 	list_for_each_entry(m, vc_list, list) {
1218 		m->offset = vmcore_off;
1219 		vmcore_off += m->size;
1220 	}
1221 }
1222 
1223 static void free_elfcorebuf(void)
1224 {
1225 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1226 	elfcorebuf = NULL;
1227 	vfree(elfnotes_buf);
1228 	elfnotes_buf = NULL;
1229 }
1230 
1231 static int __init parse_crash_elf64_headers(void)
1232 {
1233 	int rc=0;
1234 	Elf64_Ehdr ehdr;
1235 	u64 addr;
1236 
1237 	addr = elfcorehdr_addr;
1238 
1239 	/* Read Elf header */
1240 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1241 	if (rc < 0)
1242 		return rc;
1243 
1244 	/* Do some basic Verification. */
1245 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1246 		(ehdr.e_type != ET_CORE) ||
1247 		!vmcore_elf64_check_arch(&ehdr) ||
1248 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1249 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1250 		ehdr.e_version != EV_CURRENT ||
1251 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1252 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1253 		ehdr.e_phnum == 0) {
1254 		pr_warn("Warning: Core image elf header is not sane\n");
1255 		return -EINVAL;
1256 	}
1257 
1258 	/* Read in all elf headers. */
1259 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1260 				ehdr.e_phnum * sizeof(Elf64_Phdr);
1261 	elfcorebuf_sz = elfcorebuf_sz_orig;
1262 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1263 					      get_order(elfcorebuf_sz_orig));
1264 	if (!elfcorebuf)
1265 		return -ENOMEM;
1266 	addr = elfcorehdr_addr;
1267 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1268 	if (rc < 0)
1269 		goto fail;
1270 
1271 	/* Merge all PT_NOTE headers into one. */
1272 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1273 				      &elfnotes_buf, &elfnotes_sz);
1274 	if (rc)
1275 		goto fail;
1276 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1277 						  elfnotes_sz, &vmcore_list);
1278 	if (rc)
1279 		goto fail;
1280 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1281 	return 0;
1282 fail:
1283 	free_elfcorebuf();
1284 	return rc;
1285 }
1286 
1287 static int __init parse_crash_elf32_headers(void)
1288 {
1289 	int rc=0;
1290 	Elf32_Ehdr ehdr;
1291 	u64 addr;
1292 
1293 	addr = elfcorehdr_addr;
1294 
1295 	/* Read Elf header */
1296 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1297 	if (rc < 0)
1298 		return rc;
1299 
1300 	/* Do some basic Verification. */
1301 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1302 		(ehdr.e_type != ET_CORE) ||
1303 		!vmcore_elf32_check_arch(&ehdr) ||
1304 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1305 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1306 		ehdr.e_version != EV_CURRENT ||
1307 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1308 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1309 		ehdr.e_phnum == 0) {
1310 		pr_warn("Warning: Core image elf header is not sane\n");
1311 		return -EINVAL;
1312 	}
1313 
1314 	/* Read in all elf headers. */
1315 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1316 	elfcorebuf_sz = elfcorebuf_sz_orig;
1317 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1318 					      get_order(elfcorebuf_sz_orig));
1319 	if (!elfcorebuf)
1320 		return -ENOMEM;
1321 	addr = elfcorehdr_addr;
1322 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1323 	if (rc < 0)
1324 		goto fail;
1325 
1326 	/* Merge all PT_NOTE headers into one. */
1327 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1328 				      &elfnotes_buf, &elfnotes_sz);
1329 	if (rc)
1330 		goto fail;
1331 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1332 						  elfnotes_sz, &vmcore_list);
1333 	if (rc)
1334 		goto fail;
1335 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1336 	return 0;
1337 fail:
1338 	free_elfcorebuf();
1339 	return rc;
1340 }
1341 
1342 static int __init parse_crash_elf_headers(void)
1343 {
1344 	unsigned char e_ident[EI_NIDENT];
1345 	u64 addr;
1346 	int rc=0;
1347 
1348 	addr = elfcorehdr_addr;
1349 	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1350 	if (rc < 0)
1351 		return rc;
1352 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1353 		pr_warn("Warning: Core image elf header not found\n");
1354 		return -EINVAL;
1355 	}
1356 
1357 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1358 		rc = parse_crash_elf64_headers();
1359 		if (rc)
1360 			return rc;
1361 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1362 		rc = parse_crash_elf32_headers();
1363 		if (rc)
1364 			return rc;
1365 	} else {
1366 		pr_warn("Warning: Core image elf header is not sane\n");
1367 		return -EINVAL;
1368 	}
1369 
1370 	/* Determine vmcore size. */
1371 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1372 				      &vmcore_list);
1373 
1374 	return 0;
1375 }
1376 
1377 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1378 /**
1379  * vmcoredd_write_header - Write vmcore device dump header at the
1380  * beginning of the dump's buffer.
1381  * @buf: Output buffer where the note is written
1382  * @data: Dump info
1383  * @size: Size of the dump
1384  *
1385  * Fills beginning of the dump's buffer with vmcore device dump header.
1386  */
1387 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1388 				  u32 size)
1389 {
1390 	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1391 
1392 	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1393 	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1394 	vdd_hdr->n_type = NT_VMCOREDD;
1395 
1396 	strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1397 		sizeof(vdd_hdr->name));
1398 	memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1399 }
1400 
1401 /**
1402  * vmcoredd_update_program_headers - Update all Elf program headers
1403  * @elfptr: Pointer to elf header
1404  * @elfnotesz: Size of elf notes aligned to page size
1405  * @vmcoreddsz: Size of device dumps to be added to elf note header
1406  *
1407  * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1408  * Also update the offsets of all the program headers after the elf note header.
1409  */
1410 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1411 					    size_t vmcoreddsz)
1412 {
1413 	unsigned char *e_ident = (unsigned char *)elfptr;
1414 	u64 start, end, size;
1415 	loff_t vmcore_off;
1416 	u32 i;
1417 
1418 	vmcore_off = elfcorebuf_sz + elfnotesz;
1419 
1420 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1421 		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1422 		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1423 
1424 		/* Update all program headers */
1425 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1426 			if (phdr->p_type == PT_NOTE) {
1427 				/* Update note size */
1428 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1429 				phdr->p_filesz = phdr->p_memsz;
1430 				continue;
1431 			}
1432 
1433 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1434 			end = roundup(phdr->p_offset + phdr->p_memsz,
1435 				      PAGE_SIZE);
1436 			size = end - start;
1437 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1438 			vmcore_off += size;
1439 		}
1440 	} else {
1441 		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1442 		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1443 
1444 		/* Update all program headers */
1445 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1446 			if (phdr->p_type == PT_NOTE) {
1447 				/* Update note size */
1448 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1449 				phdr->p_filesz = phdr->p_memsz;
1450 				continue;
1451 			}
1452 
1453 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1454 			end = roundup(phdr->p_offset + phdr->p_memsz,
1455 				      PAGE_SIZE);
1456 			size = end - start;
1457 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1458 			vmcore_off += size;
1459 		}
1460 	}
1461 }
1462 
1463 /**
1464  * vmcoredd_update_size - Update the total size of the device dumps and update
1465  * Elf header
1466  * @dump_size: Size of the current device dump to be added to total size
1467  *
1468  * Update the total size of all the device dumps and update the Elf program
1469  * headers. Calculate the new offsets for the vmcore list and update the
1470  * total vmcore size.
1471  */
1472 static void vmcoredd_update_size(size_t dump_size)
1473 {
1474 	vmcoredd_orig_sz += dump_size;
1475 	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1476 	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1477 					vmcoredd_orig_sz);
1478 
1479 	/* Update vmcore list offsets */
1480 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1481 
1482 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1483 				      &vmcore_list);
1484 	proc_vmcore->size = vmcore_size;
1485 }
1486 
1487 /**
1488  * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1489  * @data: dump info.
1490  *
1491  * Allocate a buffer and invoke the calling driver's dump collect routine.
1492  * Write Elf note at the beginning of the buffer to indicate vmcore device
1493  * dump and add the dump to global list.
1494  */
1495 int vmcore_add_device_dump(struct vmcoredd_data *data)
1496 {
1497 	struct vmcoredd_node *dump;
1498 	void *buf = NULL;
1499 	size_t data_size;
1500 	int ret;
1501 
1502 	if (vmcoredd_disabled) {
1503 		pr_err_once("Device dump is disabled\n");
1504 		return -EINVAL;
1505 	}
1506 
1507 	if (!data || !strlen(data->dump_name) ||
1508 	    !data->vmcoredd_callback || !data->size)
1509 		return -EINVAL;
1510 
1511 	dump = vzalloc(sizeof(*dump));
1512 	if (!dump) {
1513 		ret = -ENOMEM;
1514 		goto out_err;
1515 	}
1516 
1517 	/* Keep size of the buffer page aligned so that it can be mmaped */
1518 	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1519 			    PAGE_SIZE);
1520 
1521 	/* Allocate buffer for driver's to write their dumps */
1522 	buf = vmcore_alloc_buf(data_size);
1523 	if (!buf) {
1524 		ret = -ENOMEM;
1525 		goto out_err;
1526 	}
1527 
1528 	vmcoredd_write_header(buf, data, data_size -
1529 			      sizeof(struct vmcoredd_header));
1530 
1531 	/* Invoke the driver's dump collection routing */
1532 	ret = data->vmcoredd_callback(data, buf +
1533 				      sizeof(struct vmcoredd_header));
1534 	if (ret)
1535 		goto out_err;
1536 
1537 	dump->buf = buf;
1538 	dump->size = data_size;
1539 
1540 	/* Add the dump to driver sysfs list */
1541 	mutex_lock(&vmcoredd_mutex);
1542 	list_add_tail(&dump->list, &vmcoredd_list);
1543 	mutex_unlock(&vmcoredd_mutex);
1544 
1545 	vmcoredd_update_size(data_size);
1546 	return 0;
1547 
1548 out_err:
1549 	vfree(buf);
1550 	vfree(dump);
1551 
1552 	return ret;
1553 }
1554 EXPORT_SYMBOL(vmcore_add_device_dump);
1555 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1556 
1557 /* Free all dumps in vmcore device dump list */
1558 static void vmcore_free_device_dumps(void)
1559 {
1560 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1561 	mutex_lock(&vmcoredd_mutex);
1562 	while (!list_empty(&vmcoredd_list)) {
1563 		struct vmcoredd_node *dump;
1564 
1565 		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1566 					list);
1567 		list_del(&dump->list);
1568 		vfree(dump->buf);
1569 		vfree(dump);
1570 	}
1571 	mutex_unlock(&vmcoredd_mutex);
1572 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1573 }
1574 
1575 /* Init function for vmcore module. */
1576 static int __init vmcore_init(void)
1577 {
1578 	int rc = 0;
1579 
1580 	/* Allow architectures to allocate ELF header in 2nd kernel */
1581 	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1582 	if (rc)
1583 		return rc;
1584 	/*
1585 	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1586 	 * then capture the dump.
1587 	 */
1588 	if (!(is_vmcore_usable()))
1589 		return rc;
1590 	rc = parse_crash_elf_headers();
1591 	if (rc) {
1592 		pr_warn("Kdump: vmcore not initialized\n");
1593 		return rc;
1594 	}
1595 	elfcorehdr_free(elfcorehdr_addr);
1596 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1597 
1598 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1599 	if (proc_vmcore)
1600 		proc_vmcore->size = vmcore_size;
1601 	return 0;
1602 }
1603 fs_initcall(vmcore_init);
1604 
1605 /* Cleanup function for vmcore module. */
1606 void vmcore_cleanup(void)
1607 {
1608 	if (proc_vmcore) {
1609 		proc_remove(proc_vmcore);
1610 		proc_vmcore = NULL;
1611 	}
1612 
1613 	/* clear the vmcore list. */
1614 	while (!list_empty(&vmcore_list)) {
1615 		struct vmcore *m;
1616 
1617 		m = list_first_entry(&vmcore_list, struct vmcore, list);
1618 		list_del(&m->list);
1619 		kfree(m);
1620 	}
1621 	free_elfcorebuf();
1622 
1623 	/* clear vmcore device dump list */
1624 	vmcore_free_device_dumps();
1625 }
1626