xref: /linux/fs/proc/vmcore.c (revision 2a2c74b2efcb1a0ca3fdcb5fbb96ad8de6a29177)
1 /*
2  *	fs/proc/vmcore.c Interface for accessing the crash
3  * 				 dump from the system's previous life.
4  * 	Heavily borrowed from fs/proc/kcore.c
5  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6  *	Copyright (C) IBM Corporation, 2004. All rights reserved
7  *
8  */
9 
10 #include <linux/mm.h>
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <linux/vmalloc.h>
24 #include <linux/pagemap.h>
25 #include <asm/uaccess.h>
26 #include <asm/io.h>
27 #include "internal.h"
28 
29 /* List representing chunks of contiguous memory areas and their offsets in
30  * vmcore file.
31  */
32 static LIST_HEAD(vmcore_list);
33 
34 /* Stores the pointer to the buffer containing kernel elf core headers. */
35 static char *elfcorebuf;
36 static size_t elfcorebuf_sz;
37 static size_t elfcorebuf_sz_orig;
38 
39 static char *elfnotes_buf;
40 static size_t elfnotes_sz;
41 
42 /* Total size of vmcore file. */
43 static u64 vmcore_size;
44 
45 static struct proc_dir_entry *proc_vmcore = NULL;
46 
47 /*
48  * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
49  * The called function has to take care of module refcounting.
50  */
51 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
52 
53 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
54 {
55 	if (oldmem_pfn_is_ram)
56 		return -EBUSY;
57 	oldmem_pfn_is_ram = fn;
58 	return 0;
59 }
60 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
61 
62 void unregister_oldmem_pfn_is_ram(void)
63 {
64 	oldmem_pfn_is_ram = NULL;
65 	wmb();
66 }
67 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
68 
69 static int pfn_is_ram(unsigned long pfn)
70 {
71 	int (*fn)(unsigned long pfn);
72 	/* pfn is ram unless fn() checks pagetype */
73 	int ret = 1;
74 
75 	/*
76 	 * Ask hypervisor if the pfn is really ram.
77 	 * A ballooned page contains no data and reading from such a page
78 	 * will cause high load in the hypervisor.
79 	 */
80 	fn = oldmem_pfn_is_ram;
81 	if (fn)
82 		ret = fn(pfn);
83 
84 	return ret;
85 }
86 
87 /* Reads a page from the oldmem device from given offset. */
88 static ssize_t read_from_oldmem(char *buf, size_t count,
89 				u64 *ppos, int userbuf)
90 {
91 	unsigned long pfn, offset;
92 	size_t nr_bytes;
93 	ssize_t read = 0, tmp;
94 
95 	if (!count)
96 		return 0;
97 
98 	offset = (unsigned long)(*ppos % PAGE_SIZE);
99 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
100 
101 	do {
102 		if (count > (PAGE_SIZE - offset))
103 			nr_bytes = PAGE_SIZE - offset;
104 		else
105 			nr_bytes = count;
106 
107 		/* If pfn is not ram, return zeros for sparse dump files */
108 		if (pfn_is_ram(pfn) == 0)
109 			memset(buf, 0, nr_bytes);
110 		else {
111 			tmp = copy_oldmem_page(pfn, buf, nr_bytes,
112 						offset, userbuf);
113 			if (tmp < 0)
114 				return tmp;
115 		}
116 		*ppos += nr_bytes;
117 		count -= nr_bytes;
118 		buf += nr_bytes;
119 		read += nr_bytes;
120 		++pfn;
121 		offset = 0;
122 	} while (count);
123 
124 	return read;
125 }
126 
127 /*
128  * Architectures may override this function to allocate ELF header in 2nd kernel
129  */
130 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
131 {
132 	return 0;
133 }
134 
135 /*
136  * Architectures may override this function to free header
137  */
138 void __weak elfcorehdr_free(unsigned long long addr)
139 {}
140 
141 /*
142  * Architectures may override this function to read from ELF header
143  */
144 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
145 {
146 	return read_from_oldmem(buf, count, ppos, 0);
147 }
148 
149 /*
150  * Architectures may override this function to read from notes sections
151  */
152 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
153 {
154 	return read_from_oldmem(buf, count, ppos, 0);
155 }
156 
157 /*
158  * Architectures may override this function to map oldmem
159  */
160 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161 				  unsigned long from, unsigned long pfn,
162 				  unsigned long size, pgprot_t prot)
163 {
164 	return remap_pfn_range(vma, from, pfn, size, prot);
165 }
166 
167 /*
168  * Copy to either kernel or user space
169  */
170 static int copy_to(void *target, void *src, size_t size, int userbuf)
171 {
172 	if (userbuf) {
173 		if (copy_to_user((char __user *) target, src, size))
174 			return -EFAULT;
175 	} else {
176 		memcpy(target, src, size);
177 	}
178 	return 0;
179 }
180 
181 /* Read from the ELF header and then the crash dump. On error, negative value is
182  * returned otherwise number of bytes read are returned.
183  */
184 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
185 			     int userbuf)
186 {
187 	ssize_t acc = 0, tmp;
188 	size_t tsz;
189 	u64 start;
190 	struct vmcore *m = NULL;
191 
192 	if (buflen == 0 || *fpos >= vmcore_size)
193 		return 0;
194 
195 	/* trim buflen to not go beyond EOF */
196 	if (buflen > vmcore_size - *fpos)
197 		buflen = vmcore_size - *fpos;
198 
199 	/* Read ELF core header */
200 	if (*fpos < elfcorebuf_sz) {
201 		tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
202 		if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
203 			return -EFAULT;
204 		buflen -= tsz;
205 		*fpos += tsz;
206 		buffer += tsz;
207 		acc += tsz;
208 
209 		/* leave now if filled buffer already */
210 		if (buflen == 0)
211 			return acc;
212 	}
213 
214 	/* Read Elf note segment */
215 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
216 		void *kaddr;
217 
218 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
219 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
220 		if (copy_to(buffer, kaddr, tsz, userbuf))
221 			return -EFAULT;
222 		buflen -= tsz;
223 		*fpos += tsz;
224 		buffer += tsz;
225 		acc += tsz;
226 
227 		/* leave now if filled buffer already */
228 		if (buflen == 0)
229 			return acc;
230 	}
231 
232 	list_for_each_entry(m, &vmcore_list, list) {
233 		if (*fpos < m->offset + m->size) {
234 			tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
235 			start = m->paddr + *fpos - m->offset;
236 			tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
237 			if (tmp < 0)
238 				return tmp;
239 			buflen -= tsz;
240 			*fpos += tsz;
241 			buffer += tsz;
242 			acc += tsz;
243 
244 			/* leave now if filled buffer already */
245 			if (buflen == 0)
246 				return acc;
247 		}
248 	}
249 
250 	return acc;
251 }
252 
253 static ssize_t read_vmcore(struct file *file, char __user *buffer,
254 			   size_t buflen, loff_t *fpos)
255 {
256 	return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
257 }
258 
259 /*
260  * The vmcore fault handler uses the page cache and fills data using the
261  * standard __vmcore_read() function.
262  *
263  * On s390 the fault handler is used for memory regions that can't be mapped
264  * directly with remap_pfn_range().
265  */
266 static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
267 {
268 #ifdef CONFIG_S390
269 	struct address_space *mapping = vma->vm_file->f_mapping;
270 	pgoff_t index = vmf->pgoff;
271 	struct page *page;
272 	loff_t offset;
273 	char *buf;
274 	int rc;
275 
276 	page = find_or_create_page(mapping, index, GFP_KERNEL);
277 	if (!page)
278 		return VM_FAULT_OOM;
279 	if (!PageUptodate(page)) {
280 		offset = (loff_t) index << PAGE_CACHE_SHIFT;
281 		buf = __va((page_to_pfn(page) << PAGE_SHIFT));
282 		rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
283 		if (rc < 0) {
284 			unlock_page(page);
285 			page_cache_release(page);
286 			return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
287 		}
288 		SetPageUptodate(page);
289 	}
290 	unlock_page(page);
291 	vmf->page = page;
292 	return 0;
293 #else
294 	return VM_FAULT_SIGBUS;
295 #endif
296 }
297 
298 static const struct vm_operations_struct vmcore_mmap_ops = {
299 	.fault = mmap_vmcore_fault,
300 };
301 
302 /**
303  * alloc_elfnotes_buf - allocate buffer for ELF note segment in
304  *                      vmalloc memory
305  *
306  * @notes_sz: size of buffer
307  *
308  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
309  * the buffer to user-space by means of remap_vmalloc_range().
310  *
311  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
312  * disabled and there's no need to allow users to mmap the buffer.
313  */
314 static inline char *alloc_elfnotes_buf(size_t notes_sz)
315 {
316 #ifdef CONFIG_MMU
317 	return vmalloc_user(notes_sz);
318 #else
319 	return vzalloc(notes_sz);
320 #endif
321 }
322 
323 /*
324  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
325  * essential for mmap_vmcore() in order to map physically
326  * non-contiguous objects (ELF header, ELF note segment and memory
327  * regions in the 1st kernel pointed to by PT_LOAD entries) into
328  * virtually contiguous user-space in ELF layout.
329  */
330 #ifdef CONFIG_MMU
331 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
332 {
333 	size_t size = vma->vm_end - vma->vm_start;
334 	u64 start, end, len, tsz;
335 	struct vmcore *m;
336 
337 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
338 	end = start + size;
339 
340 	if (size > vmcore_size || end > vmcore_size)
341 		return -EINVAL;
342 
343 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
344 		return -EPERM;
345 
346 	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
347 	vma->vm_flags |= VM_MIXEDMAP;
348 	vma->vm_ops = &vmcore_mmap_ops;
349 
350 	len = 0;
351 
352 	if (start < elfcorebuf_sz) {
353 		u64 pfn;
354 
355 		tsz = min(elfcorebuf_sz - (size_t)start, size);
356 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
357 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
358 				    vma->vm_page_prot))
359 			return -EAGAIN;
360 		size -= tsz;
361 		start += tsz;
362 		len += tsz;
363 
364 		if (size == 0)
365 			return 0;
366 	}
367 
368 	if (start < elfcorebuf_sz + elfnotes_sz) {
369 		void *kaddr;
370 
371 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
372 		kaddr = elfnotes_buf + start - elfcorebuf_sz;
373 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
374 						kaddr, tsz))
375 			goto fail;
376 		size -= tsz;
377 		start += tsz;
378 		len += tsz;
379 
380 		if (size == 0)
381 			return 0;
382 	}
383 
384 	list_for_each_entry(m, &vmcore_list, list) {
385 		if (start < m->offset + m->size) {
386 			u64 paddr = 0;
387 
388 			tsz = min_t(size_t, m->offset + m->size - start, size);
389 			paddr = m->paddr + start - m->offset;
390 			if (remap_oldmem_pfn_range(vma, vma->vm_start + len,
391 						   paddr >> PAGE_SHIFT, tsz,
392 						   vma->vm_page_prot))
393 				goto fail;
394 			size -= tsz;
395 			start += tsz;
396 			len += tsz;
397 
398 			if (size == 0)
399 				return 0;
400 		}
401 	}
402 
403 	return 0;
404 fail:
405 	do_munmap(vma->vm_mm, vma->vm_start, len);
406 	return -EAGAIN;
407 }
408 #else
409 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
410 {
411 	return -ENOSYS;
412 }
413 #endif
414 
415 static const struct file_operations proc_vmcore_operations = {
416 	.read		= read_vmcore,
417 	.llseek		= default_llseek,
418 	.mmap		= mmap_vmcore,
419 };
420 
421 static struct vmcore* __init get_new_element(void)
422 {
423 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
424 }
425 
426 static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
427 				  struct list_head *vc_list)
428 {
429 	u64 size;
430 	struct vmcore *m;
431 
432 	size = elfsz + elfnotesegsz;
433 	list_for_each_entry(m, vc_list, list) {
434 		size += m->size;
435 	}
436 	return size;
437 }
438 
439 /**
440  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
441  *
442  * @ehdr_ptr: ELF header
443  *
444  * This function updates p_memsz member of each PT_NOTE entry in the
445  * program header table pointed to by @ehdr_ptr to real size of ELF
446  * note segment.
447  */
448 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
449 {
450 	int i, rc=0;
451 	Elf64_Phdr *phdr_ptr;
452 	Elf64_Nhdr *nhdr_ptr;
453 
454 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
455 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
456 		void *notes_section;
457 		u64 offset, max_sz, sz, real_sz = 0;
458 		if (phdr_ptr->p_type != PT_NOTE)
459 			continue;
460 		max_sz = phdr_ptr->p_memsz;
461 		offset = phdr_ptr->p_offset;
462 		notes_section = kmalloc(max_sz, GFP_KERNEL);
463 		if (!notes_section)
464 			return -ENOMEM;
465 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
466 		if (rc < 0) {
467 			kfree(notes_section);
468 			return rc;
469 		}
470 		nhdr_ptr = notes_section;
471 		while (nhdr_ptr->n_namesz != 0) {
472 			sz = sizeof(Elf64_Nhdr) +
473 				((nhdr_ptr->n_namesz + 3) & ~3) +
474 				((nhdr_ptr->n_descsz + 3) & ~3);
475 			if ((real_sz + sz) > max_sz) {
476 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
477 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
478 				break;
479 			}
480 			real_sz += sz;
481 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
482 		}
483 		kfree(notes_section);
484 		phdr_ptr->p_memsz = real_sz;
485 		if (real_sz == 0) {
486 			pr_warn("Warning: Zero PT_NOTE entries found\n");
487 		}
488 	}
489 
490 	return 0;
491 }
492 
493 /**
494  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
495  * headers and sum of real size of their ELF note segment headers and
496  * data.
497  *
498  * @ehdr_ptr: ELF header
499  * @nr_ptnote: buffer for the number of PT_NOTE program headers
500  * @sz_ptnote: buffer for size of unique PT_NOTE program header
501  *
502  * This function is used to merge multiple PT_NOTE program headers
503  * into a unique single one. The resulting unique entry will have
504  * @sz_ptnote in its phdr->p_mem.
505  *
506  * It is assumed that program headers with PT_NOTE type pointed to by
507  * @ehdr_ptr has already been updated by update_note_header_size_elf64
508  * and each of PT_NOTE program headers has actual ELF note segment
509  * size in its p_memsz member.
510  */
511 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
512 						 int *nr_ptnote, u64 *sz_ptnote)
513 {
514 	int i;
515 	Elf64_Phdr *phdr_ptr;
516 
517 	*nr_ptnote = *sz_ptnote = 0;
518 
519 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
520 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
521 		if (phdr_ptr->p_type != PT_NOTE)
522 			continue;
523 		*nr_ptnote += 1;
524 		*sz_ptnote += phdr_ptr->p_memsz;
525 	}
526 
527 	return 0;
528 }
529 
530 /**
531  * copy_notes_elf64 - copy ELF note segments in a given buffer
532  *
533  * @ehdr_ptr: ELF header
534  * @notes_buf: buffer into which ELF note segments are copied
535  *
536  * This function is used to copy ELF note segment in the 1st kernel
537  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
538  * size of the buffer @notes_buf is equal to or larger than sum of the
539  * real ELF note segment headers and data.
540  *
541  * It is assumed that program headers with PT_NOTE type pointed to by
542  * @ehdr_ptr has already been updated by update_note_header_size_elf64
543  * and each of PT_NOTE program headers has actual ELF note segment
544  * size in its p_memsz member.
545  */
546 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
547 {
548 	int i, rc=0;
549 	Elf64_Phdr *phdr_ptr;
550 
551 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
552 
553 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
554 		u64 offset;
555 		if (phdr_ptr->p_type != PT_NOTE)
556 			continue;
557 		offset = phdr_ptr->p_offset;
558 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
559 					   &offset);
560 		if (rc < 0)
561 			return rc;
562 		notes_buf += phdr_ptr->p_memsz;
563 	}
564 
565 	return 0;
566 }
567 
568 /* Merges all the PT_NOTE headers into one. */
569 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
570 					   char **notes_buf, size_t *notes_sz)
571 {
572 	int i, nr_ptnote=0, rc=0;
573 	char *tmp;
574 	Elf64_Ehdr *ehdr_ptr;
575 	Elf64_Phdr phdr;
576 	u64 phdr_sz = 0, note_off;
577 
578 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
579 
580 	rc = update_note_header_size_elf64(ehdr_ptr);
581 	if (rc < 0)
582 		return rc;
583 
584 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
585 	if (rc < 0)
586 		return rc;
587 
588 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
589 	*notes_buf = alloc_elfnotes_buf(*notes_sz);
590 	if (!*notes_buf)
591 		return -ENOMEM;
592 
593 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
594 	if (rc < 0)
595 		return rc;
596 
597 	/* Prepare merged PT_NOTE program header. */
598 	phdr.p_type    = PT_NOTE;
599 	phdr.p_flags   = 0;
600 	note_off = sizeof(Elf64_Ehdr) +
601 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
602 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
603 	phdr.p_vaddr   = phdr.p_paddr = 0;
604 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
605 	phdr.p_align   = 0;
606 
607 	/* Add merged PT_NOTE program header*/
608 	tmp = elfptr + sizeof(Elf64_Ehdr);
609 	memcpy(tmp, &phdr, sizeof(phdr));
610 	tmp += sizeof(phdr);
611 
612 	/* Remove unwanted PT_NOTE program headers. */
613 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
614 	*elfsz = *elfsz - i;
615 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
616 	memset(elfptr + *elfsz, 0, i);
617 	*elfsz = roundup(*elfsz, PAGE_SIZE);
618 
619 	/* Modify e_phnum to reflect merged headers. */
620 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
621 
622 	return 0;
623 }
624 
625 /**
626  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
627  *
628  * @ehdr_ptr: ELF header
629  *
630  * This function updates p_memsz member of each PT_NOTE entry in the
631  * program header table pointed to by @ehdr_ptr to real size of ELF
632  * note segment.
633  */
634 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
635 {
636 	int i, rc=0;
637 	Elf32_Phdr *phdr_ptr;
638 	Elf32_Nhdr *nhdr_ptr;
639 
640 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
641 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
642 		void *notes_section;
643 		u64 offset, max_sz, sz, real_sz = 0;
644 		if (phdr_ptr->p_type != PT_NOTE)
645 			continue;
646 		max_sz = phdr_ptr->p_memsz;
647 		offset = phdr_ptr->p_offset;
648 		notes_section = kmalloc(max_sz, GFP_KERNEL);
649 		if (!notes_section)
650 			return -ENOMEM;
651 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
652 		if (rc < 0) {
653 			kfree(notes_section);
654 			return rc;
655 		}
656 		nhdr_ptr = notes_section;
657 		while (nhdr_ptr->n_namesz != 0) {
658 			sz = sizeof(Elf32_Nhdr) +
659 				((nhdr_ptr->n_namesz + 3) & ~3) +
660 				((nhdr_ptr->n_descsz + 3) & ~3);
661 			if ((real_sz + sz) > max_sz) {
662 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
663 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
664 				break;
665 			}
666 			real_sz += sz;
667 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
668 		}
669 		kfree(notes_section);
670 		phdr_ptr->p_memsz = real_sz;
671 		if (real_sz == 0) {
672 			pr_warn("Warning: Zero PT_NOTE entries found\n");
673 		}
674 	}
675 
676 	return 0;
677 }
678 
679 /**
680  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
681  * headers and sum of real size of their ELF note segment headers and
682  * data.
683  *
684  * @ehdr_ptr: ELF header
685  * @nr_ptnote: buffer for the number of PT_NOTE program headers
686  * @sz_ptnote: buffer for size of unique PT_NOTE program header
687  *
688  * This function is used to merge multiple PT_NOTE program headers
689  * into a unique single one. The resulting unique entry will have
690  * @sz_ptnote in its phdr->p_mem.
691  *
692  * It is assumed that program headers with PT_NOTE type pointed to by
693  * @ehdr_ptr has already been updated by update_note_header_size_elf32
694  * and each of PT_NOTE program headers has actual ELF note segment
695  * size in its p_memsz member.
696  */
697 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
698 						 int *nr_ptnote, u64 *sz_ptnote)
699 {
700 	int i;
701 	Elf32_Phdr *phdr_ptr;
702 
703 	*nr_ptnote = *sz_ptnote = 0;
704 
705 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
706 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
707 		if (phdr_ptr->p_type != PT_NOTE)
708 			continue;
709 		*nr_ptnote += 1;
710 		*sz_ptnote += phdr_ptr->p_memsz;
711 	}
712 
713 	return 0;
714 }
715 
716 /**
717  * copy_notes_elf32 - copy ELF note segments in a given buffer
718  *
719  * @ehdr_ptr: ELF header
720  * @notes_buf: buffer into which ELF note segments are copied
721  *
722  * This function is used to copy ELF note segment in the 1st kernel
723  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
724  * size of the buffer @notes_buf is equal to or larger than sum of the
725  * real ELF note segment headers and data.
726  *
727  * It is assumed that program headers with PT_NOTE type pointed to by
728  * @ehdr_ptr has already been updated by update_note_header_size_elf32
729  * and each of PT_NOTE program headers has actual ELF note segment
730  * size in its p_memsz member.
731  */
732 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
733 {
734 	int i, rc=0;
735 	Elf32_Phdr *phdr_ptr;
736 
737 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
738 
739 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
740 		u64 offset;
741 		if (phdr_ptr->p_type != PT_NOTE)
742 			continue;
743 		offset = phdr_ptr->p_offset;
744 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
745 					   &offset);
746 		if (rc < 0)
747 			return rc;
748 		notes_buf += phdr_ptr->p_memsz;
749 	}
750 
751 	return 0;
752 }
753 
754 /* Merges all the PT_NOTE headers into one. */
755 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
756 					   char **notes_buf, size_t *notes_sz)
757 {
758 	int i, nr_ptnote=0, rc=0;
759 	char *tmp;
760 	Elf32_Ehdr *ehdr_ptr;
761 	Elf32_Phdr phdr;
762 	u64 phdr_sz = 0, note_off;
763 
764 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
765 
766 	rc = update_note_header_size_elf32(ehdr_ptr);
767 	if (rc < 0)
768 		return rc;
769 
770 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
771 	if (rc < 0)
772 		return rc;
773 
774 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
775 	*notes_buf = alloc_elfnotes_buf(*notes_sz);
776 	if (!*notes_buf)
777 		return -ENOMEM;
778 
779 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
780 	if (rc < 0)
781 		return rc;
782 
783 	/* Prepare merged PT_NOTE program header. */
784 	phdr.p_type    = PT_NOTE;
785 	phdr.p_flags   = 0;
786 	note_off = sizeof(Elf32_Ehdr) +
787 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
788 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
789 	phdr.p_vaddr   = phdr.p_paddr = 0;
790 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
791 	phdr.p_align   = 0;
792 
793 	/* Add merged PT_NOTE program header*/
794 	tmp = elfptr + sizeof(Elf32_Ehdr);
795 	memcpy(tmp, &phdr, sizeof(phdr));
796 	tmp += sizeof(phdr);
797 
798 	/* Remove unwanted PT_NOTE program headers. */
799 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
800 	*elfsz = *elfsz - i;
801 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
802 	memset(elfptr + *elfsz, 0, i);
803 	*elfsz = roundup(*elfsz, PAGE_SIZE);
804 
805 	/* Modify e_phnum to reflect merged headers. */
806 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
807 
808 	return 0;
809 }
810 
811 /* Add memory chunks represented by program headers to vmcore list. Also update
812  * the new offset fields of exported program headers. */
813 static int __init process_ptload_program_headers_elf64(char *elfptr,
814 						size_t elfsz,
815 						size_t elfnotes_sz,
816 						struct list_head *vc_list)
817 {
818 	int i;
819 	Elf64_Ehdr *ehdr_ptr;
820 	Elf64_Phdr *phdr_ptr;
821 	loff_t vmcore_off;
822 	struct vmcore *new;
823 
824 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
825 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
826 
827 	/* Skip Elf header, program headers and Elf note segment. */
828 	vmcore_off = elfsz + elfnotes_sz;
829 
830 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
831 		u64 paddr, start, end, size;
832 
833 		if (phdr_ptr->p_type != PT_LOAD)
834 			continue;
835 
836 		paddr = phdr_ptr->p_offset;
837 		start = rounddown(paddr, PAGE_SIZE);
838 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
839 		size = end - start;
840 
841 		/* Add this contiguous chunk of memory to vmcore list.*/
842 		new = get_new_element();
843 		if (!new)
844 			return -ENOMEM;
845 		new->paddr = start;
846 		new->size = size;
847 		list_add_tail(&new->list, vc_list);
848 
849 		/* Update the program header offset. */
850 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
851 		vmcore_off = vmcore_off + size;
852 	}
853 	return 0;
854 }
855 
856 static int __init process_ptload_program_headers_elf32(char *elfptr,
857 						size_t elfsz,
858 						size_t elfnotes_sz,
859 						struct list_head *vc_list)
860 {
861 	int i;
862 	Elf32_Ehdr *ehdr_ptr;
863 	Elf32_Phdr *phdr_ptr;
864 	loff_t vmcore_off;
865 	struct vmcore *new;
866 
867 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
868 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
869 
870 	/* Skip Elf header, program headers and Elf note segment. */
871 	vmcore_off = elfsz + elfnotes_sz;
872 
873 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
874 		u64 paddr, start, end, size;
875 
876 		if (phdr_ptr->p_type != PT_LOAD)
877 			continue;
878 
879 		paddr = phdr_ptr->p_offset;
880 		start = rounddown(paddr, PAGE_SIZE);
881 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
882 		size = end - start;
883 
884 		/* Add this contiguous chunk of memory to vmcore list.*/
885 		new = get_new_element();
886 		if (!new)
887 			return -ENOMEM;
888 		new->paddr = start;
889 		new->size = size;
890 		list_add_tail(&new->list, vc_list);
891 
892 		/* Update the program header offset */
893 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
894 		vmcore_off = vmcore_off + size;
895 	}
896 	return 0;
897 }
898 
899 /* Sets offset fields of vmcore elements. */
900 static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
901 					   struct list_head *vc_list)
902 {
903 	loff_t vmcore_off;
904 	struct vmcore *m;
905 
906 	/* Skip Elf header, program headers and Elf note segment. */
907 	vmcore_off = elfsz + elfnotes_sz;
908 
909 	list_for_each_entry(m, vc_list, list) {
910 		m->offset = vmcore_off;
911 		vmcore_off += m->size;
912 	}
913 }
914 
915 static void free_elfcorebuf(void)
916 {
917 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
918 	elfcorebuf = NULL;
919 	vfree(elfnotes_buf);
920 	elfnotes_buf = NULL;
921 }
922 
923 static int __init parse_crash_elf64_headers(void)
924 {
925 	int rc=0;
926 	Elf64_Ehdr ehdr;
927 	u64 addr;
928 
929 	addr = elfcorehdr_addr;
930 
931 	/* Read Elf header */
932 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
933 	if (rc < 0)
934 		return rc;
935 
936 	/* Do some basic Verification. */
937 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
938 		(ehdr.e_type != ET_CORE) ||
939 		!vmcore_elf64_check_arch(&ehdr) ||
940 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
941 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
942 		ehdr.e_version != EV_CURRENT ||
943 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
944 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
945 		ehdr.e_phnum == 0) {
946 		pr_warn("Warning: Core image elf header is not sane\n");
947 		return -EINVAL;
948 	}
949 
950 	/* Read in all elf headers. */
951 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
952 				ehdr.e_phnum * sizeof(Elf64_Phdr);
953 	elfcorebuf_sz = elfcorebuf_sz_orig;
954 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
955 					      get_order(elfcorebuf_sz_orig));
956 	if (!elfcorebuf)
957 		return -ENOMEM;
958 	addr = elfcorehdr_addr;
959 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
960 	if (rc < 0)
961 		goto fail;
962 
963 	/* Merge all PT_NOTE headers into one. */
964 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
965 				      &elfnotes_buf, &elfnotes_sz);
966 	if (rc)
967 		goto fail;
968 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
969 						  elfnotes_sz, &vmcore_list);
970 	if (rc)
971 		goto fail;
972 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
973 	return 0;
974 fail:
975 	free_elfcorebuf();
976 	return rc;
977 }
978 
979 static int __init parse_crash_elf32_headers(void)
980 {
981 	int rc=0;
982 	Elf32_Ehdr ehdr;
983 	u64 addr;
984 
985 	addr = elfcorehdr_addr;
986 
987 	/* Read Elf header */
988 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
989 	if (rc < 0)
990 		return rc;
991 
992 	/* Do some basic Verification. */
993 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
994 		(ehdr.e_type != ET_CORE) ||
995 		!elf_check_arch(&ehdr) ||
996 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
997 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
998 		ehdr.e_version != EV_CURRENT ||
999 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1000 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1001 		ehdr.e_phnum == 0) {
1002 		pr_warn("Warning: Core image elf header is not sane\n");
1003 		return -EINVAL;
1004 	}
1005 
1006 	/* Read in all elf headers. */
1007 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1008 	elfcorebuf_sz = elfcorebuf_sz_orig;
1009 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1010 					      get_order(elfcorebuf_sz_orig));
1011 	if (!elfcorebuf)
1012 		return -ENOMEM;
1013 	addr = elfcorehdr_addr;
1014 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1015 	if (rc < 0)
1016 		goto fail;
1017 
1018 	/* Merge all PT_NOTE headers into one. */
1019 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1020 				      &elfnotes_buf, &elfnotes_sz);
1021 	if (rc)
1022 		goto fail;
1023 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1024 						  elfnotes_sz, &vmcore_list);
1025 	if (rc)
1026 		goto fail;
1027 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1028 	return 0;
1029 fail:
1030 	free_elfcorebuf();
1031 	return rc;
1032 }
1033 
1034 static int __init parse_crash_elf_headers(void)
1035 {
1036 	unsigned char e_ident[EI_NIDENT];
1037 	u64 addr;
1038 	int rc=0;
1039 
1040 	addr = elfcorehdr_addr;
1041 	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1042 	if (rc < 0)
1043 		return rc;
1044 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1045 		pr_warn("Warning: Core image elf header not found\n");
1046 		return -EINVAL;
1047 	}
1048 
1049 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1050 		rc = parse_crash_elf64_headers();
1051 		if (rc)
1052 			return rc;
1053 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1054 		rc = parse_crash_elf32_headers();
1055 		if (rc)
1056 			return rc;
1057 	} else {
1058 		pr_warn("Warning: Core image elf header is not sane\n");
1059 		return -EINVAL;
1060 	}
1061 
1062 	/* Determine vmcore size. */
1063 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1064 				      &vmcore_list);
1065 
1066 	return 0;
1067 }
1068 
1069 /* Init function for vmcore module. */
1070 static int __init vmcore_init(void)
1071 {
1072 	int rc = 0;
1073 
1074 	/* Allow architectures to allocate ELF header in 2nd kernel */
1075 	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1076 	if (rc)
1077 		return rc;
1078 	/*
1079 	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1080 	 * then capture the dump.
1081 	 */
1082 	if (!(is_vmcore_usable()))
1083 		return rc;
1084 	rc = parse_crash_elf_headers();
1085 	if (rc) {
1086 		pr_warn("Kdump: vmcore not initialized\n");
1087 		return rc;
1088 	}
1089 	elfcorehdr_free(elfcorehdr_addr);
1090 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1091 
1092 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1093 	if (proc_vmcore)
1094 		proc_vmcore->size = vmcore_size;
1095 	return 0;
1096 }
1097 fs_initcall(vmcore_init);
1098 
1099 /* Cleanup function for vmcore module. */
1100 void vmcore_cleanup(void)
1101 {
1102 	struct list_head *pos, *next;
1103 
1104 	if (proc_vmcore) {
1105 		proc_remove(proc_vmcore);
1106 		proc_vmcore = NULL;
1107 	}
1108 
1109 	/* clear the vmcore list. */
1110 	list_for_each_safe(pos, next, &vmcore_list) {
1111 		struct vmcore *m;
1112 
1113 		m = list_entry(pos, struct vmcore, list);
1114 		list_del(&m->list);
1115 		kfree(m);
1116 	}
1117 	free_elfcorebuf();
1118 }
1119