xref: /linux/fs/proc/vmcore.c (revision a634dda26186cf9a51567020fcce52bcba5e1e59)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	fs/proc/vmcore.c Interface for accessing the crash
4  * 				 dump from the system's previous life.
5  * 	Heavily borrowed from fs/proc/kcore.c
6  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7  *	Copyright (C) IBM Corporation, 2004. All rights reserved
8  *
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uio.h>
29 #include <linux/cc_platform.h>
30 #include <asm/io.h>
31 #include "internal.h"
32 
33 /* List representing chunks of contiguous memory areas and their offsets in
34  * vmcore file.
35  */
36 static LIST_HEAD(vmcore_list);
37 
38 /* Stores the pointer to the buffer containing kernel elf core headers. */
39 static char *elfcorebuf;
40 static size_t elfcorebuf_sz;
41 static size_t elfcorebuf_sz_orig;
42 
43 static char *elfnotes_buf;
44 static size_t elfnotes_sz;
45 /* Size of all notes minus the device dump notes */
46 static size_t elfnotes_orig_sz;
47 
48 /* Total size of vmcore file. */
49 static u64 vmcore_size;
50 
51 static struct proc_dir_entry *proc_vmcore;
52 
53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54 /* Device Dump list and mutex to synchronize access to list */
55 static LIST_HEAD(vmcoredd_list);
56 static DEFINE_MUTEX(vmcoredd_mutex);
57 
58 static bool vmcoredd_disabled;
59 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
61 
62 /* Device Dump Size */
63 static size_t vmcoredd_orig_sz;
64 
65 static DEFINE_SPINLOCK(vmcore_cb_lock);
66 DEFINE_STATIC_SRCU(vmcore_cb_srcu);
67 /* List of registered vmcore callbacks. */
68 static LIST_HEAD(vmcore_cb_list);
69 /* Whether the vmcore has been opened once. */
70 static bool vmcore_opened;
71 
72 void register_vmcore_cb(struct vmcore_cb *cb)
73 {
74 	INIT_LIST_HEAD(&cb->next);
75 	spin_lock(&vmcore_cb_lock);
76 	list_add_tail(&cb->next, &vmcore_cb_list);
77 	/*
78 	 * Registering a vmcore callback after the vmcore was opened is
79 	 * very unusual (e.g., manual driver loading).
80 	 */
81 	if (vmcore_opened)
82 		pr_warn_once("Unexpected vmcore callback registration\n");
83 	spin_unlock(&vmcore_cb_lock);
84 }
85 EXPORT_SYMBOL_GPL(register_vmcore_cb);
86 
87 void unregister_vmcore_cb(struct vmcore_cb *cb)
88 {
89 	spin_lock(&vmcore_cb_lock);
90 	list_del_rcu(&cb->next);
91 	/*
92 	 * Unregistering a vmcore callback after the vmcore was opened is
93 	 * very unusual (e.g., forced driver removal), but we cannot stop
94 	 * unregistering.
95 	 */
96 	if (vmcore_opened)
97 		pr_warn_once("Unexpected vmcore callback unregistration\n");
98 	spin_unlock(&vmcore_cb_lock);
99 
100 	synchronize_srcu(&vmcore_cb_srcu);
101 }
102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
103 
104 static bool pfn_is_ram(unsigned long pfn)
105 {
106 	struct vmcore_cb *cb;
107 	bool ret = true;
108 
109 	list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
110 				 srcu_read_lock_held(&vmcore_cb_srcu)) {
111 		if (unlikely(!cb->pfn_is_ram))
112 			continue;
113 		ret = cb->pfn_is_ram(cb, pfn);
114 		if (!ret)
115 			break;
116 	}
117 
118 	return ret;
119 }
120 
121 static int open_vmcore(struct inode *inode, struct file *file)
122 {
123 	spin_lock(&vmcore_cb_lock);
124 	vmcore_opened = true;
125 	spin_unlock(&vmcore_cb_lock);
126 
127 	return 0;
128 }
129 
130 /* Reads a page from the oldmem device from given offset. */
131 ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
132 			 u64 *ppos, bool encrypted)
133 {
134 	unsigned long pfn, offset;
135 	ssize_t nr_bytes;
136 	ssize_t read = 0, tmp;
137 	int idx;
138 
139 	if (!count)
140 		return 0;
141 
142 	offset = (unsigned long)(*ppos % PAGE_SIZE);
143 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
144 
145 	idx = srcu_read_lock(&vmcore_cb_srcu);
146 	do {
147 		if (count > (PAGE_SIZE - offset))
148 			nr_bytes = PAGE_SIZE - offset;
149 		else
150 			nr_bytes = count;
151 
152 		/* If pfn is not ram, return zeros for sparse dump files */
153 		if (!pfn_is_ram(pfn)) {
154 			tmp = iov_iter_zero(nr_bytes, iter);
155 		} else {
156 			if (encrypted)
157 				tmp = copy_oldmem_page_encrypted(iter, pfn,
158 								 nr_bytes,
159 								 offset);
160 			else
161 				tmp = copy_oldmem_page(iter, pfn, nr_bytes,
162 						       offset);
163 		}
164 		if (tmp < nr_bytes) {
165 			srcu_read_unlock(&vmcore_cb_srcu, idx);
166 			return -EFAULT;
167 		}
168 
169 		*ppos += nr_bytes;
170 		count -= nr_bytes;
171 		read += nr_bytes;
172 		++pfn;
173 		offset = 0;
174 	} while (count);
175 	srcu_read_unlock(&vmcore_cb_srcu, idx);
176 
177 	return read;
178 }
179 
180 /*
181  * Architectures may override this function to allocate ELF header in 2nd kernel
182  */
183 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
184 {
185 	return 0;
186 }
187 
188 /*
189  * Architectures may override this function to free header
190  */
191 void __weak elfcorehdr_free(unsigned long long addr)
192 {}
193 
194 /*
195  * Architectures may override this function to read from ELF header
196  */
197 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
198 {
199 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
200 	struct iov_iter iter;
201 
202 	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
203 
204 	return read_from_oldmem(&iter, count, ppos, false);
205 }
206 
207 /*
208  * Architectures may override this function to read from notes sections
209  */
210 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
211 {
212 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
213 	struct iov_iter iter;
214 
215 	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
216 
217 	return read_from_oldmem(&iter, count, ppos,
218 			cc_platform_has(CC_ATTR_MEM_ENCRYPT));
219 }
220 
221 /*
222  * Architectures may override this function to map oldmem
223  */
224 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
225 				  unsigned long from, unsigned long pfn,
226 				  unsigned long size, pgprot_t prot)
227 {
228 	prot = pgprot_encrypted(prot);
229 	return remap_pfn_range(vma, from, pfn, size, prot);
230 }
231 
232 /*
233  * Architectures which support memory encryption override this.
234  */
235 ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
236 		unsigned long pfn, size_t csize, unsigned long offset)
237 {
238 	return copy_oldmem_page(iter, pfn, csize, offset);
239 }
240 
241 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
242 static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
243 {
244 	struct vmcoredd_node *dump;
245 	u64 offset = 0;
246 	int ret = 0;
247 	size_t tsz;
248 	char *buf;
249 
250 	mutex_lock(&vmcoredd_mutex);
251 	list_for_each_entry(dump, &vmcoredd_list, list) {
252 		if (start < offset + dump->size) {
253 			tsz = min(offset + (u64)dump->size - start, (u64)size);
254 			buf = dump->buf + start - offset;
255 			if (copy_to_iter(buf, tsz, iter) < tsz) {
256 				ret = -EFAULT;
257 				goto out_unlock;
258 			}
259 
260 			size -= tsz;
261 			start += tsz;
262 
263 			/* Leave now if buffer filled already */
264 			if (!size)
265 				goto out_unlock;
266 		}
267 		offset += dump->size;
268 	}
269 
270 out_unlock:
271 	mutex_unlock(&vmcoredd_mutex);
272 	return ret;
273 }
274 
275 #ifdef CONFIG_MMU
276 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
277 			       u64 start, size_t size)
278 {
279 	struct vmcoredd_node *dump;
280 	u64 offset = 0;
281 	int ret = 0;
282 	size_t tsz;
283 	char *buf;
284 
285 	mutex_lock(&vmcoredd_mutex);
286 	list_for_each_entry(dump, &vmcoredd_list, list) {
287 		if (start < offset + dump->size) {
288 			tsz = min(offset + (u64)dump->size - start, (u64)size);
289 			buf = dump->buf + start - offset;
290 			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
291 							tsz)) {
292 				ret = -EFAULT;
293 				goto out_unlock;
294 			}
295 
296 			size -= tsz;
297 			start += tsz;
298 			dst += tsz;
299 
300 			/* Leave now if buffer filled already */
301 			if (!size)
302 				goto out_unlock;
303 		}
304 		offset += dump->size;
305 	}
306 
307 out_unlock:
308 	mutex_unlock(&vmcoredd_mutex);
309 	return ret;
310 }
311 #endif /* CONFIG_MMU */
312 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
313 
314 /* Read from the ELF header and then the crash dump. On error, negative value is
315  * returned otherwise number of bytes read are returned.
316  */
317 static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
318 {
319 	ssize_t acc = 0, tmp;
320 	size_t tsz;
321 	u64 start;
322 	struct vmcore *m = NULL;
323 
324 	if (!iov_iter_count(iter) || *fpos >= vmcore_size)
325 		return 0;
326 
327 	iov_iter_truncate(iter, vmcore_size - *fpos);
328 
329 	/* Read ELF core header */
330 	if (*fpos < elfcorebuf_sz) {
331 		tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
332 		if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
333 			return -EFAULT;
334 		*fpos += tsz;
335 		acc += tsz;
336 
337 		/* leave now if filled buffer already */
338 		if (!iov_iter_count(iter))
339 			return acc;
340 	}
341 
342 	/* Read ELF note segment */
343 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
344 		void *kaddr;
345 
346 		/* We add device dumps before other elf notes because the
347 		 * other elf notes may not fill the elf notes buffer
348 		 * completely and we will end up with zero-filled data
349 		 * between the elf notes and the device dumps. Tools will
350 		 * then try to decode this zero-filled data as valid notes
351 		 * and we don't want that. Hence, adding device dumps before
352 		 * the other elf notes ensure that zero-filled data can be
353 		 * avoided.
354 		 */
355 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
356 		/* Read device dumps */
357 		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
358 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
359 				  (size_t)*fpos, iov_iter_count(iter));
360 			start = *fpos - elfcorebuf_sz;
361 			if (vmcoredd_copy_dumps(iter, start, tsz))
362 				return -EFAULT;
363 
364 			*fpos += tsz;
365 			acc += tsz;
366 
367 			/* leave now if filled buffer already */
368 			if (!iov_iter_count(iter))
369 				return acc;
370 		}
371 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
372 
373 		/* Read remaining elf notes */
374 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
375 			  iov_iter_count(iter));
376 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
377 		if (copy_to_iter(kaddr, tsz, iter) < tsz)
378 			return -EFAULT;
379 
380 		*fpos += tsz;
381 		acc += tsz;
382 
383 		/* leave now if filled buffer already */
384 		if (!iov_iter_count(iter))
385 			return acc;
386 
387 		cond_resched();
388 	}
389 
390 	list_for_each_entry(m, &vmcore_list, list) {
391 		if (*fpos < m->offset + m->size) {
392 			tsz = (size_t)min_t(unsigned long long,
393 					    m->offset + m->size - *fpos,
394 					    iov_iter_count(iter));
395 			start = m->paddr + *fpos - m->offset;
396 			tmp = read_from_oldmem(iter, tsz, &start,
397 					cc_platform_has(CC_ATTR_MEM_ENCRYPT));
398 			if (tmp < 0)
399 				return tmp;
400 			*fpos += tsz;
401 			acc += tsz;
402 
403 			/* leave now if filled buffer already */
404 			if (!iov_iter_count(iter))
405 				return acc;
406 		}
407 
408 		cond_resched();
409 	}
410 
411 	return acc;
412 }
413 
414 static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
415 {
416 	return __read_vmcore(iter, &iocb->ki_pos);
417 }
418 
419 /**
420  * vmcore_alloc_buf - allocate buffer in vmalloc memory
421  * @size: size of buffer
422  *
423  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
424  * the buffer to user-space by means of remap_vmalloc_range().
425  *
426  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
427  * disabled and there's no need to allow users to mmap the buffer.
428  */
429 static inline char *vmcore_alloc_buf(size_t size)
430 {
431 #ifdef CONFIG_MMU
432 	return vmalloc_user(size);
433 #else
434 	return vzalloc(size);
435 #endif
436 }
437 
438 /*
439  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
440  * essential for mmap_vmcore() in order to map physically
441  * non-contiguous objects (ELF header, ELF note segment and memory
442  * regions in the 1st kernel pointed to by PT_LOAD entries) into
443  * virtually contiguous user-space in ELF layout.
444  */
445 #ifdef CONFIG_MMU
446 
447 /*
448  * The vmcore fault handler uses the page cache and fills data using the
449  * standard __read_vmcore() function.
450  *
451  * On s390 the fault handler is used for memory regions that can't be mapped
452  * directly with remap_pfn_range().
453  */
454 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
455 {
456 #ifdef CONFIG_S390
457 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
458 	pgoff_t index = vmf->pgoff;
459 	struct iov_iter iter;
460 	struct kvec kvec;
461 	struct page *page;
462 	loff_t offset;
463 	int rc;
464 
465 	page = find_or_create_page(mapping, index, GFP_KERNEL);
466 	if (!page)
467 		return VM_FAULT_OOM;
468 	if (!PageUptodate(page)) {
469 		offset = (loff_t) index << PAGE_SHIFT;
470 		kvec.iov_base = page_address(page);
471 		kvec.iov_len = PAGE_SIZE;
472 		iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
473 
474 		rc = __read_vmcore(&iter, &offset);
475 		if (rc < 0) {
476 			unlock_page(page);
477 			put_page(page);
478 			return vmf_error(rc);
479 		}
480 		SetPageUptodate(page);
481 	}
482 	unlock_page(page);
483 	vmf->page = page;
484 	return 0;
485 #else
486 	return VM_FAULT_SIGBUS;
487 #endif
488 }
489 
490 static const struct vm_operations_struct vmcore_mmap_ops = {
491 	.fault = mmap_vmcore_fault,
492 };
493 
494 /*
495  * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
496  * reported as not being ram with the zero page.
497  *
498  * @vma: vm_area_struct describing requested mapping
499  * @from: start remapping from
500  * @pfn: page frame number to start remapping to
501  * @size: remapping size
502  * @prot: protection bits
503  *
504  * Returns zero on success, -EAGAIN on failure.
505  */
506 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
507 				    unsigned long from, unsigned long pfn,
508 				    unsigned long size, pgprot_t prot)
509 {
510 	unsigned long map_size;
511 	unsigned long pos_start, pos_end, pos;
512 	unsigned long zeropage_pfn = my_zero_pfn(0);
513 	size_t len = 0;
514 
515 	pos_start = pfn;
516 	pos_end = pfn + (size >> PAGE_SHIFT);
517 
518 	for (pos = pos_start; pos < pos_end; ++pos) {
519 		if (!pfn_is_ram(pos)) {
520 			/*
521 			 * We hit a page which is not ram. Remap the continuous
522 			 * region between pos_start and pos-1 and replace
523 			 * the non-ram page at pos with the zero page.
524 			 */
525 			if (pos > pos_start) {
526 				/* Remap continuous region */
527 				map_size = (pos - pos_start) << PAGE_SHIFT;
528 				if (remap_oldmem_pfn_range(vma, from + len,
529 							   pos_start, map_size,
530 							   prot))
531 					goto fail;
532 				len += map_size;
533 			}
534 			/* Remap the zero page */
535 			if (remap_oldmem_pfn_range(vma, from + len,
536 						   zeropage_pfn,
537 						   PAGE_SIZE, prot))
538 				goto fail;
539 			len += PAGE_SIZE;
540 			pos_start = pos + 1;
541 		}
542 	}
543 	if (pos > pos_start) {
544 		/* Remap the rest */
545 		map_size = (pos - pos_start) << PAGE_SHIFT;
546 		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
547 					   map_size, prot))
548 			goto fail;
549 	}
550 	return 0;
551 fail:
552 	do_munmap(vma->vm_mm, from, len, NULL);
553 	return -EAGAIN;
554 }
555 
556 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
557 			    unsigned long from, unsigned long pfn,
558 			    unsigned long size, pgprot_t prot)
559 {
560 	int ret, idx;
561 
562 	/*
563 	 * Check if a callback was registered to avoid looping over all
564 	 * pages without a reason.
565 	 */
566 	idx = srcu_read_lock(&vmcore_cb_srcu);
567 	if (!list_empty(&vmcore_cb_list))
568 		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
569 	else
570 		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
571 	srcu_read_unlock(&vmcore_cb_srcu, idx);
572 	return ret;
573 }
574 
575 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
576 {
577 	size_t size = vma->vm_end - vma->vm_start;
578 	u64 start, end, len, tsz;
579 	struct vmcore *m;
580 
581 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
582 	end = start + size;
583 
584 	if (size > vmcore_size || end > vmcore_size)
585 		return -EINVAL;
586 
587 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
588 		return -EPERM;
589 
590 	vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
591 	vma->vm_ops = &vmcore_mmap_ops;
592 
593 	len = 0;
594 
595 	if (start < elfcorebuf_sz) {
596 		u64 pfn;
597 
598 		tsz = min(elfcorebuf_sz - (size_t)start, size);
599 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
600 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
601 				    vma->vm_page_prot))
602 			return -EAGAIN;
603 		size -= tsz;
604 		start += tsz;
605 		len += tsz;
606 
607 		if (size == 0)
608 			return 0;
609 	}
610 
611 	if (start < elfcorebuf_sz + elfnotes_sz) {
612 		void *kaddr;
613 
614 		/* We add device dumps before other elf notes because the
615 		 * other elf notes may not fill the elf notes buffer
616 		 * completely and we will end up with zero-filled data
617 		 * between the elf notes and the device dumps. Tools will
618 		 * then try to decode this zero-filled data as valid notes
619 		 * and we don't want that. Hence, adding device dumps before
620 		 * the other elf notes ensure that zero-filled data can be
621 		 * avoided. This also ensures that the device dumps and
622 		 * other elf notes can be properly mmaped at page aligned
623 		 * address.
624 		 */
625 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
626 		/* Read device dumps */
627 		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
628 			u64 start_off;
629 
630 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
631 				  (size_t)start, size);
632 			start_off = start - elfcorebuf_sz;
633 			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
634 						start_off, tsz))
635 				goto fail;
636 
637 			size -= tsz;
638 			start += tsz;
639 			len += tsz;
640 
641 			/* leave now if filled buffer already */
642 			if (!size)
643 				return 0;
644 		}
645 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
646 
647 		/* Read remaining elf notes */
648 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
649 		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
650 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
651 						kaddr, 0, tsz))
652 			goto fail;
653 
654 		size -= tsz;
655 		start += tsz;
656 		len += tsz;
657 
658 		if (size == 0)
659 			return 0;
660 	}
661 
662 	list_for_each_entry(m, &vmcore_list, list) {
663 		if (start < m->offset + m->size) {
664 			u64 paddr = 0;
665 
666 			tsz = (size_t)min_t(unsigned long long,
667 					    m->offset + m->size - start, size);
668 			paddr = m->paddr + start - m->offset;
669 			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
670 						    paddr >> PAGE_SHIFT, tsz,
671 						    vma->vm_page_prot))
672 				goto fail;
673 			size -= tsz;
674 			start += tsz;
675 			len += tsz;
676 
677 			if (size == 0)
678 				return 0;
679 		}
680 	}
681 
682 	return 0;
683 fail:
684 	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
685 	return -EAGAIN;
686 }
687 #else
688 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
689 {
690 	return -ENOSYS;
691 }
692 #endif
693 
694 static const struct proc_ops vmcore_proc_ops = {
695 	.proc_open	= open_vmcore,
696 	.proc_read_iter	= read_vmcore,
697 	.proc_lseek	= default_llseek,
698 	.proc_mmap	= mmap_vmcore,
699 };
700 
701 static struct vmcore* __init get_new_element(void)
702 {
703 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
704 }
705 
706 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
707 			   struct list_head *vc_list)
708 {
709 	u64 size;
710 	struct vmcore *m;
711 
712 	size = elfsz + elfnotesegsz;
713 	list_for_each_entry(m, vc_list, list) {
714 		size += m->size;
715 	}
716 	return size;
717 }
718 
719 /**
720  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
721  *
722  * @ehdr_ptr: ELF header
723  *
724  * This function updates p_memsz member of each PT_NOTE entry in the
725  * program header table pointed to by @ehdr_ptr to real size of ELF
726  * note segment.
727  */
728 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
729 {
730 	int i, rc=0;
731 	Elf64_Phdr *phdr_ptr;
732 	Elf64_Nhdr *nhdr_ptr;
733 
734 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
735 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
736 		void *notes_section;
737 		u64 offset, max_sz, sz, real_sz = 0;
738 		if (phdr_ptr->p_type != PT_NOTE)
739 			continue;
740 		max_sz = phdr_ptr->p_memsz;
741 		offset = phdr_ptr->p_offset;
742 		notes_section = kmalloc(max_sz, GFP_KERNEL);
743 		if (!notes_section)
744 			return -ENOMEM;
745 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
746 		if (rc < 0) {
747 			kfree(notes_section);
748 			return rc;
749 		}
750 		nhdr_ptr = notes_section;
751 		while (nhdr_ptr->n_namesz != 0) {
752 			sz = sizeof(Elf64_Nhdr) +
753 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
754 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
755 			if ((real_sz + sz) > max_sz) {
756 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
757 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
758 				break;
759 			}
760 			real_sz += sz;
761 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
762 		}
763 		kfree(notes_section);
764 		phdr_ptr->p_memsz = real_sz;
765 		if (real_sz == 0) {
766 			pr_warn("Warning: Zero PT_NOTE entries found\n");
767 		}
768 	}
769 
770 	return 0;
771 }
772 
773 /**
774  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
775  * headers and sum of real size of their ELF note segment headers and
776  * data.
777  *
778  * @ehdr_ptr: ELF header
779  * @nr_ptnote: buffer for the number of PT_NOTE program headers
780  * @sz_ptnote: buffer for size of unique PT_NOTE program header
781  *
782  * This function is used to merge multiple PT_NOTE program headers
783  * into a unique single one. The resulting unique entry will have
784  * @sz_ptnote in its phdr->p_mem.
785  *
786  * It is assumed that program headers with PT_NOTE type pointed to by
787  * @ehdr_ptr has already been updated by update_note_header_size_elf64
788  * and each of PT_NOTE program headers has actual ELF note segment
789  * size in its p_memsz member.
790  */
791 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
792 						 int *nr_ptnote, u64 *sz_ptnote)
793 {
794 	int i;
795 	Elf64_Phdr *phdr_ptr;
796 
797 	*nr_ptnote = *sz_ptnote = 0;
798 
799 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
800 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
801 		if (phdr_ptr->p_type != PT_NOTE)
802 			continue;
803 		*nr_ptnote += 1;
804 		*sz_ptnote += phdr_ptr->p_memsz;
805 	}
806 
807 	return 0;
808 }
809 
810 /**
811  * copy_notes_elf64 - copy ELF note segments in a given buffer
812  *
813  * @ehdr_ptr: ELF header
814  * @notes_buf: buffer into which ELF note segments are copied
815  *
816  * This function is used to copy ELF note segment in the 1st kernel
817  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
818  * size of the buffer @notes_buf is equal to or larger than sum of the
819  * real ELF note segment headers and data.
820  *
821  * It is assumed that program headers with PT_NOTE type pointed to by
822  * @ehdr_ptr has already been updated by update_note_header_size_elf64
823  * and each of PT_NOTE program headers has actual ELF note segment
824  * size in its p_memsz member.
825  */
826 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
827 {
828 	int i, rc=0;
829 	Elf64_Phdr *phdr_ptr;
830 
831 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
832 
833 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
834 		u64 offset;
835 		if (phdr_ptr->p_type != PT_NOTE)
836 			continue;
837 		offset = phdr_ptr->p_offset;
838 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
839 					   &offset);
840 		if (rc < 0)
841 			return rc;
842 		notes_buf += phdr_ptr->p_memsz;
843 	}
844 
845 	return 0;
846 }
847 
848 /* Merges all the PT_NOTE headers into one. */
849 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
850 					   char **notes_buf, size_t *notes_sz)
851 {
852 	int i, nr_ptnote=0, rc=0;
853 	char *tmp;
854 	Elf64_Ehdr *ehdr_ptr;
855 	Elf64_Phdr phdr;
856 	u64 phdr_sz = 0, note_off;
857 
858 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
859 
860 	rc = update_note_header_size_elf64(ehdr_ptr);
861 	if (rc < 0)
862 		return rc;
863 
864 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
865 	if (rc < 0)
866 		return rc;
867 
868 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
869 	*notes_buf = vmcore_alloc_buf(*notes_sz);
870 	if (!*notes_buf)
871 		return -ENOMEM;
872 
873 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
874 	if (rc < 0)
875 		return rc;
876 
877 	/* Prepare merged PT_NOTE program header. */
878 	phdr.p_type    = PT_NOTE;
879 	phdr.p_flags   = 0;
880 	note_off = sizeof(Elf64_Ehdr) +
881 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
882 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
883 	phdr.p_vaddr   = phdr.p_paddr = 0;
884 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
885 	phdr.p_align   = 4;
886 
887 	/* Add merged PT_NOTE program header*/
888 	tmp = elfptr + sizeof(Elf64_Ehdr);
889 	memcpy(tmp, &phdr, sizeof(phdr));
890 	tmp += sizeof(phdr);
891 
892 	/* Remove unwanted PT_NOTE program headers. */
893 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
894 	*elfsz = *elfsz - i;
895 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
896 	memset(elfptr + *elfsz, 0, i);
897 	*elfsz = roundup(*elfsz, PAGE_SIZE);
898 
899 	/* Modify e_phnum to reflect merged headers. */
900 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
901 
902 	/* Store the size of all notes.  We need this to update the note
903 	 * header when the device dumps will be added.
904 	 */
905 	elfnotes_orig_sz = phdr.p_memsz;
906 
907 	return 0;
908 }
909 
910 /**
911  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
912  *
913  * @ehdr_ptr: ELF header
914  *
915  * This function updates p_memsz member of each PT_NOTE entry in the
916  * program header table pointed to by @ehdr_ptr to real size of ELF
917  * note segment.
918  */
919 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
920 {
921 	int i, rc=0;
922 	Elf32_Phdr *phdr_ptr;
923 	Elf32_Nhdr *nhdr_ptr;
924 
925 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
926 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
927 		void *notes_section;
928 		u64 offset, max_sz, sz, real_sz = 0;
929 		if (phdr_ptr->p_type != PT_NOTE)
930 			continue;
931 		max_sz = phdr_ptr->p_memsz;
932 		offset = phdr_ptr->p_offset;
933 		notes_section = kmalloc(max_sz, GFP_KERNEL);
934 		if (!notes_section)
935 			return -ENOMEM;
936 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
937 		if (rc < 0) {
938 			kfree(notes_section);
939 			return rc;
940 		}
941 		nhdr_ptr = notes_section;
942 		while (nhdr_ptr->n_namesz != 0) {
943 			sz = sizeof(Elf32_Nhdr) +
944 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
945 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
946 			if ((real_sz + sz) > max_sz) {
947 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
948 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
949 				break;
950 			}
951 			real_sz += sz;
952 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
953 		}
954 		kfree(notes_section);
955 		phdr_ptr->p_memsz = real_sz;
956 		if (real_sz == 0) {
957 			pr_warn("Warning: Zero PT_NOTE entries found\n");
958 		}
959 	}
960 
961 	return 0;
962 }
963 
964 /**
965  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
966  * headers and sum of real size of their ELF note segment headers and
967  * data.
968  *
969  * @ehdr_ptr: ELF header
970  * @nr_ptnote: buffer for the number of PT_NOTE program headers
971  * @sz_ptnote: buffer for size of unique PT_NOTE program header
972  *
973  * This function is used to merge multiple PT_NOTE program headers
974  * into a unique single one. The resulting unique entry will have
975  * @sz_ptnote in its phdr->p_mem.
976  *
977  * It is assumed that program headers with PT_NOTE type pointed to by
978  * @ehdr_ptr has already been updated by update_note_header_size_elf32
979  * and each of PT_NOTE program headers has actual ELF note segment
980  * size in its p_memsz member.
981  */
982 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
983 						 int *nr_ptnote, u64 *sz_ptnote)
984 {
985 	int i;
986 	Elf32_Phdr *phdr_ptr;
987 
988 	*nr_ptnote = *sz_ptnote = 0;
989 
990 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
991 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
992 		if (phdr_ptr->p_type != PT_NOTE)
993 			continue;
994 		*nr_ptnote += 1;
995 		*sz_ptnote += phdr_ptr->p_memsz;
996 	}
997 
998 	return 0;
999 }
1000 
1001 /**
1002  * copy_notes_elf32 - copy ELF note segments in a given buffer
1003  *
1004  * @ehdr_ptr: ELF header
1005  * @notes_buf: buffer into which ELF note segments are copied
1006  *
1007  * This function is used to copy ELF note segment in the 1st kernel
1008  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1009  * size of the buffer @notes_buf is equal to or larger than sum of the
1010  * real ELF note segment headers and data.
1011  *
1012  * It is assumed that program headers with PT_NOTE type pointed to by
1013  * @ehdr_ptr has already been updated by update_note_header_size_elf32
1014  * and each of PT_NOTE program headers has actual ELF note segment
1015  * size in its p_memsz member.
1016  */
1017 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1018 {
1019 	int i, rc=0;
1020 	Elf32_Phdr *phdr_ptr;
1021 
1022 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1023 
1024 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1025 		u64 offset;
1026 		if (phdr_ptr->p_type != PT_NOTE)
1027 			continue;
1028 		offset = phdr_ptr->p_offset;
1029 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1030 					   &offset);
1031 		if (rc < 0)
1032 			return rc;
1033 		notes_buf += phdr_ptr->p_memsz;
1034 	}
1035 
1036 	return 0;
1037 }
1038 
1039 /* Merges all the PT_NOTE headers into one. */
1040 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1041 					   char **notes_buf, size_t *notes_sz)
1042 {
1043 	int i, nr_ptnote=0, rc=0;
1044 	char *tmp;
1045 	Elf32_Ehdr *ehdr_ptr;
1046 	Elf32_Phdr phdr;
1047 	u64 phdr_sz = 0, note_off;
1048 
1049 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1050 
1051 	rc = update_note_header_size_elf32(ehdr_ptr);
1052 	if (rc < 0)
1053 		return rc;
1054 
1055 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1056 	if (rc < 0)
1057 		return rc;
1058 
1059 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1060 	*notes_buf = vmcore_alloc_buf(*notes_sz);
1061 	if (!*notes_buf)
1062 		return -ENOMEM;
1063 
1064 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1065 	if (rc < 0)
1066 		return rc;
1067 
1068 	/* Prepare merged PT_NOTE program header. */
1069 	phdr.p_type    = PT_NOTE;
1070 	phdr.p_flags   = 0;
1071 	note_off = sizeof(Elf32_Ehdr) +
1072 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1073 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1074 	phdr.p_vaddr   = phdr.p_paddr = 0;
1075 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1076 	phdr.p_align   = 4;
1077 
1078 	/* Add merged PT_NOTE program header*/
1079 	tmp = elfptr + sizeof(Elf32_Ehdr);
1080 	memcpy(tmp, &phdr, sizeof(phdr));
1081 	tmp += sizeof(phdr);
1082 
1083 	/* Remove unwanted PT_NOTE program headers. */
1084 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1085 	*elfsz = *elfsz - i;
1086 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1087 	memset(elfptr + *elfsz, 0, i);
1088 	*elfsz = roundup(*elfsz, PAGE_SIZE);
1089 
1090 	/* Modify e_phnum to reflect merged headers. */
1091 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1092 
1093 	/* Store the size of all notes.  We need this to update the note
1094 	 * header when the device dumps will be added.
1095 	 */
1096 	elfnotes_orig_sz = phdr.p_memsz;
1097 
1098 	return 0;
1099 }
1100 
1101 /* Add memory chunks represented by program headers to vmcore list. Also update
1102  * the new offset fields of exported program headers. */
1103 static int __init process_ptload_program_headers_elf64(char *elfptr,
1104 						size_t elfsz,
1105 						size_t elfnotes_sz,
1106 						struct list_head *vc_list)
1107 {
1108 	int i;
1109 	Elf64_Ehdr *ehdr_ptr;
1110 	Elf64_Phdr *phdr_ptr;
1111 	loff_t vmcore_off;
1112 	struct vmcore *new;
1113 
1114 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1115 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1116 
1117 	/* Skip ELF header, program headers and ELF note segment. */
1118 	vmcore_off = elfsz + elfnotes_sz;
1119 
1120 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1121 		u64 paddr, start, end, size;
1122 
1123 		if (phdr_ptr->p_type != PT_LOAD)
1124 			continue;
1125 
1126 		paddr = phdr_ptr->p_offset;
1127 		start = rounddown(paddr, PAGE_SIZE);
1128 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1129 		size = end - start;
1130 
1131 		/* Add this contiguous chunk of memory to vmcore list.*/
1132 		new = get_new_element();
1133 		if (!new)
1134 			return -ENOMEM;
1135 		new->paddr = start;
1136 		new->size = size;
1137 		list_add_tail(&new->list, vc_list);
1138 
1139 		/* Update the program header offset. */
1140 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1141 		vmcore_off = vmcore_off + size;
1142 	}
1143 	return 0;
1144 }
1145 
1146 static int __init process_ptload_program_headers_elf32(char *elfptr,
1147 						size_t elfsz,
1148 						size_t elfnotes_sz,
1149 						struct list_head *vc_list)
1150 {
1151 	int i;
1152 	Elf32_Ehdr *ehdr_ptr;
1153 	Elf32_Phdr *phdr_ptr;
1154 	loff_t vmcore_off;
1155 	struct vmcore *new;
1156 
1157 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1158 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1159 
1160 	/* Skip ELF header, program headers and ELF note segment. */
1161 	vmcore_off = elfsz + elfnotes_sz;
1162 
1163 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1164 		u64 paddr, start, end, size;
1165 
1166 		if (phdr_ptr->p_type != PT_LOAD)
1167 			continue;
1168 
1169 		paddr = phdr_ptr->p_offset;
1170 		start = rounddown(paddr, PAGE_SIZE);
1171 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1172 		size = end - start;
1173 
1174 		/* Add this contiguous chunk of memory to vmcore list.*/
1175 		new = get_new_element();
1176 		if (!new)
1177 			return -ENOMEM;
1178 		new->paddr = start;
1179 		new->size = size;
1180 		list_add_tail(&new->list, vc_list);
1181 
1182 		/* Update the program header offset */
1183 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1184 		vmcore_off = vmcore_off + size;
1185 	}
1186 	return 0;
1187 }
1188 
1189 /* Sets offset fields of vmcore elements. */
1190 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1191 				    struct list_head *vc_list)
1192 {
1193 	loff_t vmcore_off;
1194 	struct vmcore *m;
1195 
1196 	/* Skip ELF header, program headers and ELF note segment. */
1197 	vmcore_off = elfsz + elfnotes_sz;
1198 
1199 	list_for_each_entry(m, vc_list, list) {
1200 		m->offset = vmcore_off;
1201 		vmcore_off += m->size;
1202 	}
1203 }
1204 
1205 static void free_elfcorebuf(void)
1206 {
1207 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1208 	elfcorebuf = NULL;
1209 	vfree(elfnotes_buf);
1210 	elfnotes_buf = NULL;
1211 }
1212 
1213 static int __init parse_crash_elf64_headers(void)
1214 {
1215 	int rc=0;
1216 	Elf64_Ehdr ehdr;
1217 	u64 addr;
1218 
1219 	addr = elfcorehdr_addr;
1220 
1221 	/* Read ELF header */
1222 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1223 	if (rc < 0)
1224 		return rc;
1225 
1226 	/* Do some basic Verification. */
1227 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1228 		(ehdr.e_type != ET_CORE) ||
1229 		!vmcore_elf64_check_arch(&ehdr) ||
1230 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1231 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1232 		ehdr.e_version != EV_CURRENT ||
1233 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1234 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1235 		ehdr.e_phnum == 0) {
1236 		pr_warn("Warning: Core image elf header is not sane\n");
1237 		return -EINVAL;
1238 	}
1239 
1240 	/* Read in all elf headers. */
1241 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1242 				ehdr.e_phnum * sizeof(Elf64_Phdr);
1243 	elfcorebuf_sz = elfcorebuf_sz_orig;
1244 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1245 					      get_order(elfcorebuf_sz_orig));
1246 	if (!elfcorebuf)
1247 		return -ENOMEM;
1248 	addr = elfcorehdr_addr;
1249 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1250 	if (rc < 0)
1251 		goto fail;
1252 
1253 	/* Merge all PT_NOTE headers into one. */
1254 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1255 				      &elfnotes_buf, &elfnotes_sz);
1256 	if (rc)
1257 		goto fail;
1258 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1259 						  elfnotes_sz, &vmcore_list);
1260 	if (rc)
1261 		goto fail;
1262 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1263 	return 0;
1264 fail:
1265 	free_elfcorebuf();
1266 	return rc;
1267 }
1268 
1269 static int __init parse_crash_elf32_headers(void)
1270 {
1271 	int rc=0;
1272 	Elf32_Ehdr ehdr;
1273 	u64 addr;
1274 
1275 	addr = elfcorehdr_addr;
1276 
1277 	/* Read ELF header */
1278 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1279 	if (rc < 0)
1280 		return rc;
1281 
1282 	/* Do some basic Verification. */
1283 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1284 		(ehdr.e_type != ET_CORE) ||
1285 		!vmcore_elf32_check_arch(&ehdr) ||
1286 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1287 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1288 		ehdr.e_version != EV_CURRENT ||
1289 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1290 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1291 		ehdr.e_phnum == 0) {
1292 		pr_warn("Warning: Core image elf header is not sane\n");
1293 		return -EINVAL;
1294 	}
1295 
1296 	/* Read in all elf headers. */
1297 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1298 	elfcorebuf_sz = elfcorebuf_sz_orig;
1299 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1300 					      get_order(elfcorebuf_sz_orig));
1301 	if (!elfcorebuf)
1302 		return -ENOMEM;
1303 	addr = elfcorehdr_addr;
1304 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1305 	if (rc < 0)
1306 		goto fail;
1307 
1308 	/* Merge all PT_NOTE headers into one. */
1309 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1310 				      &elfnotes_buf, &elfnotes_sz);
1311 	if (rc)
1312 		goto fail;
1313 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1314 						  elfnotes_sz, &vmcore_list);
1315 	if (rc)
1316 		goto fail;
1317 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1318 	return 0;
1319 fail:
1320 	free_elfcorebuf();
1321 	return rc;
1322 }
1323 
1324 static int __init parse_crash_elf_headers(void)
1325 {
1326 	unsigned char e_ident[EI_NIDENT];
1327 	u64 addr;
1328 	int rc=0;
1329 
1330 	addr = elfcorehdr_addr;
1331 	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1332 	if (rc < 0)
1333 		return rc;
1334 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1335 		pr_warn("Warning: Core image elf header not found\n");
1336 		return -EINVAL;
1337 	}
1338 
1339 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1340 		rc = parse_crash_elf64_headers();
1341 		if (rc)
1342 			return rc;
1343 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1344 		rc = parse_crash_elf32_headers();
1345 		if (rc)
1346 			return rc;
1347 	} else {
1348 		pr_warn("Warning: Core image elf header is not sane\n");
1349 		return -EINVAL;
1350 	}
1351 
1352 	/* Determine vmcore size. */
1353 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1354 				      &vmcore_list);
1355 
1356 	return 0;
1357 }
1358 
1359 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1360 /**
1361  * vmcoredd_write_header - Write vmcore device dump header at the
1362  * beginning of the dump's buffer.
1363  * @buf: Output buffer where the note is written
1364  * @data: Dump info
1365  * @size: Size of the dump
1366  *
1367  * Fills beginning of the dump's buffer with vmcore device dump header.
1368  */
1369 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1370 				  u32 size)
1371 {
1372 	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1373 
1374 	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1375 	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1376 	vdd_hdr->n_type = NT_VMCOREDD;
1377 
1378 	strscpy_pad(vdd_hdr->name, VMCOREDD_NOTE_NAME);
1379 	strscpy_pad(vdd_hdr->dump_name, data->dump_name);
1380 }
1381 
1382 /**
1383  * vmcoredd_update_program_headers - Update all ELF program headers
1384  * @elfptr: Pointer to elf header
1385  * @elfnotesz: Size of elf notes aligned to page size
1386  * @vmcoreddsz: Size of device dumps to be added to elf note header
1387  *
1388  * Determine type of ELF header (Elf64 or Elf32) and update the elf note size.
1389  * Also update the offsets of all the program headers after the elf note header.
1390  */
1391 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1392 					    size_t vmcoreddsz)
1393 {
1394 	unsigned char *e_ident = (unsigned char *)elfptr;
1395 	u64 start, end, size;
1396 	loff_t vmcore_off;
1397 	u32 i;
1398 
1399 	vmcore_off = elfcorebuf_sz + elfnotesz;
1400 
1401 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1402 		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1403 		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1404 
1405 		/* Update all program headers */
1406 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1407 			if (phdr->p_type == PT_NOTE) {
1408 				/* Update note size */
1409 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1410 				phdr->p_filesz = phdr->p_memsz;
1411 				continue;
1412 			}
1413 
1414 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1415 			end = roundup(phdr->p_offset + phdr->p_memsz,
1416 				      PAGE_SIZE);
1417 			size = end - start;
1418 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1419 			vmcore_off += size;
1420 		}
1421 	} else {
1422 		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1423 		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1424 
1425 		/* Update all program headers */
1426 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1427 			if (phdr->p_type == PT_NOTE) {
1428 				/* Update note size */
1429 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1430 				phdr->p_filesz = phdr->p_memsz;
1431 				continue;
1432 			}
1433 
1434 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1435 			end = roundup(phdr->p_offset + phdr->p_memsz,
1436 				      PAGE_SIZE);
1437 			size = end - start;
1438 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1439 			vmcore_off += size;
1440 		}
1441 	}
1442 }
1443 
1444 /**
1445  * vmcoredd_update_size - Update the total size of the device dumps and update
1446  * ELF header
1447  * @dump_size: Size of the current device dump to be added to total size
1448  *
1449  * Update the total size of all the device dumps and update the ELF program
1450  * headers. Calculate the new offsets for the vmcore list and update the
1451  * total vmcore size.
1452  */
1453 static void vmcoredd_update_size(size_t dump_size)
1454 {
1455 	vmcoredd_orig_sz += dump_size;
1456 	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1457 	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1458 					vmcoredd_orig_sz);
1459 
1460 	/* Update vmcore list offsets */
1461 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1462 
1463 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1464 				      &vmcore_list);
1465 	proc_vmcore->size = vmcore_size;
1466 }
1467 
1468 /**
1469  * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1470  * @data: dump info.
1471  *
1472  * Allocate a buffer and invoke the calling driver's dump collect routine.
1473  * Write ELF note at the beginning of the buffer to indicate vmcore device
1474  * dump and add the dump to global list.
1475  */
1476 int vmcore_add_device_dump(struct vmcoredd_data *data)
1477 {
1478 	struct vmcoredd_node *dump;
1479 	void *buf = NULL;
1480 	size_t data_size;
1481 	int ret;
1482 
1483 	if (vmcoredd_disabled) {
1484 		pr_err_once("Device dump is disabled\n");
1485 		return -EINVAL;
1486 	}
1487 
1488 	if (!data || !strlen(data->dump_name) ||
1489 	    !data->vmcoredd_callback || !data->size)
1490 		return -EINVAL;
1491 
1492 	dump = vzalloc(sizeof(*dump));
1493 	if (!dump) {
1494 		ret = -ENOMEM;
1495 		goto out_err;
1496 	}
1497 
1498 	/* Keep size of the buffer page aligned so that it can be mmaped */
1499 	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1500 			    PAGE_SIZE);
1501 
1502 	/* Allocate buffer for driver's to write their dumps */
1503 	buf = vmcore_alloc_buf(data_size);
1504 	if (!buf) {
1505 		ret = -ENOMEM;
1506 		goto out_err;
1507 	}
1508 
1509 	vmcoredd_write_header(buf, data, data_size -
1510 			      sizeof(struct vmcoredd_header));
1511 
1512 	/* Invoke the driver's dump collection routing */
1513 	ret = data->vmcoredd_callback(data, buf +
1514 				      sizeof(struct vmcoredd_header));
1515 	if (ret)
1516 		goto out_err;
1517 
1518 	dump->buf = buf;
1519 	dump->size = data_size;
1520 
1521 	/* Add the dump to driver sysfs list */
1522 	mutex_lock(&vmcoredd_mutex);
1523 	list_add_tail(&dump->list, &vmcoredd_list);
1524 	mutex_unlock(&vmcoredd_mutex);
1525 
1526 	vmcoredd_update_size(data_size);
1527 	return 0;
1528 
1529 out_err:
1530 	vfree(buf);
1531 	vfree(dump);
1532 
1533 	return ret;
1534 }
1535 EXPORT_SYMBOL(vmcore_add_device_dump);
1536 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1537 
1538 /* Free all dumps in vmcore device dump list */
1539 static void vmcore_free_device_dumps(void)
1540 {
1541 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1542 	mutex_lock(&vmcoredd_mutex);
1543 	while (!list_empty(&vmcoredd_list)) {
1544 		struct vmcoredd_node *dump;
1545 
1546 		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1547 					list);
1548 		list_del(&dump->list);
1549 		vfree(dump->buf);
1550 		vfree(dump);
1551 	}
1552 	mutex_unlock(&vmcoredd_mutex);
1553 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1554 }
1555 
1556 /* Init function for vmcore module. */
1557 static int __init vmcore_init(void)
1558 {
1559 	int rc = 0;
1560 
1561 	/* Allow architectures to allocate ELF header in 2nd kernel */
1562 	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1563 	if (rc)
1564 		return rc;
1565 	/*
1566 	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1567 	 * then capture the dump.
1568 	 */
1569 	if (!(is_vmcore_usable()))
1570 		return rc;
1571 	rc = parse_crash_elf_headers();
1572 	if (rc) {
1573 		elfcorehdr_free(elfcorehdr_addr);
1574 		pr_warn("Kdump: vmcore not initialized\n");
1575 		return rc;
1576 	}
1577 	elfcorehdr_free(elfcorehdr_addr);
1578 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1579 
1580 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1581 	if (proc_vmcore)
1582 		proc_vmcore->size = vmcore_size;
1583 	return 0;
1584 }
1585 fs_initcall(vmcore_init);
1586 
1587 /* Cleanup function for vmcore module. */
1588 void vmcore_cleanup(void)
1589 {
1590 	if (proc_vmcore) {
1591 		proc_remove(proc_vmcore);
1592 		proc_vmcore = NULL;
1593 	}
1594 
1595 	/* clear the vmcore list. */
1596 	while (!list_empty(&vmcore_list)) {
1597 		struct vmcore *m;
1598 
1599 		m = list_first_entry(&vmcore_list, struct vmcore, list);
1600 		list_del(&m->list);
1601 		kfree(m);
1602 	}
1603 	free_elfcorebuf();
1604 
1605 	/* clear vmcore device dump list */
1606 	vmcore_free_device_dumps();
1607 }
1608