xref: /linux/fs/proc/vmcore.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  *	fs/proc/vmcore.c Interface for accessing the crash
3  * 				 dump from the system's previous life.
4  * 	Heavily borrowed from fs/proc/kcore.c
5  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6  *	Copyright (C) IBM Corporation, 2004. All rights reserved
7  *
8  */
9 
10 #include <linux/config.h>
11 #include <linux/mm.h>
12 #include <linux/proc_fs.h>
13 #include <linux/user.h>
14 #include <linux/a.out.h>
15 #include <linux/elf.h>
16 #include <linux/elfcore.h>
17 #include <linux/highmem.h>
18 #include <linux/bootmem.h>
19 #include <linux/init.h>
20 #include <linux/crash_dump.h>
21 #include <linux/list.h>
22 #include <asm/uaccess.h>
23 #include <asm/io.h>
24 
25 /* List representing chunks of contiguous memory areas and their offsets in
26  * vmcore file.
27  */
28 static LIST_HEAD(vmcore_list);
29 
30 /* Stores the pointer to the buffer containing kernel elf core headers. */
31 static char *elfcorebuf;
32 static size_t elfcorebuf_sz;
33 
34 /* Total size of vmcore file. */
35 static u64 vmcore_size;
36 
37 /* Stores the physical address of elf header of crash image. */
38 unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
39 
40 struct proc_dir_entry *proc_vmcore = NULL;
41 
42 /* Reads a page from the oldmem device from given offset. */
43 static ssize_t read_from_oldmem(char *buf, size_t count,
44 				u64 *ppos, int userbuf)
45 {
46 	unsigned long pfn, offset;
47 	size_t nr_bytes;
48 	ssize_t read = 0, tmp;
49 
50 	if (!count)
51 		return 0;
52 
53 	offset = (unsigned long)(*ppos % PAGE_SIZE);
54 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
55 	if (pfn > saved_max_pfn)
56 		return -EINVAL;
57 
58 	do {
59 		if (count > (PAGE_SIZE - offset))
60 			nr_bytes = PAGE_SIZE - offset;
61 		else
62 			nr_bytes = count;
63 
64 		tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf);
65 		if (tmp < 0)
66 			return tmp;
67 		*ppos += nr_bytes;
68 		count -= nr_bytes;
69 		buf += nr_bytes;
70 		read += nr_bytes;
71 		++pfn;
72 		offset = 0;
73 	} while (count);
74 
75 	return read;
76 }
77 
78 /* Maps vmcore file offset to respective physical address in memroy. */
79 static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
80 					struct vmcore **m_ptr)
81 {
82 	struct vmcore *m;
83 	u64 paddr;
84 
85 	list_for_each_entry(m, vc_list, list) {
86 		u64 start, end;
87 		start = m->offset;
88 		end = m->offset + m->size - 1;
89 		if (offset >= start && offset <= end) {
90 			paddr = m->paddr + offset - start;
91 			*m_ptr = m;
92 			return paddr;
93 		}
94 	}
95 	*m_ptr = NULL;
96 	return 0;
97 }
98 
99 /* Read from the ELF header and then the crash dump. On error, negative value is
100  * returned otherwise number of bytes read are returned.
101  */
102 static ssize_t read_vmcore(struct file *file, char __user *buffer,
103 				size_t buflen, loff_t *fpos)
104 {
105 	ssize_t acc = 0, tmp;
106 	size_t tsz;
107 	u64 start, nr_bytes;
108 	struct vmcore *curr_m = NULL;
109 
110 	if (buflen == 0 || *fpos >= vmcore_size)
111 		return 0;
112 
113 	/* trim buflen to not go beyond EOF */
114 	if (buflen > vmcore_size - *fpos)
115 		buflen = vmcore_size - *fpos;
116 
117 	/* Read ELF core header */
118 	if (*fpos < elfcorebuf_sz) {
119 		tsz = elfcorebuf_sz - *fpos;
120 		if (buflen < tsz)
121 			tsz = buflen;
122 		if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
123 			return -EFAULT;
124 		buflen -= tsz;
125 		*fpos += tsz;
126 		buffer += tsz;
127 		acc += tsz;
128 
129 		/* leave now if filled buffer already */
130 		if (buflen == 0)
131 			return acc;
132 	}
133 
134 	start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
135 	if (!curr_m)
136         	return -EINVAL;
137 	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
138 		tsz = buflen;
139 
140 	/* Calculate left bytes in current memory segment. */
141 	nr_bytes = (curr_m->size - (start - curr_m->paddr));
142 	if (tsz > nr_bytes)
143 		tsz = nr_bytes;
144 
145 	while (buflen) {
146 		tmp = read_from_oldmem(buffer, tsz, &start, 1);
147 		if (tmp < 0)
148 			return tmp;
149 		buflen -= tsz;
150 		*fpos += tsz;
151 		buffer += tsz;
152 		acc += tsz;
153 		if (start >= (curr_m->paddr + curr_m->size)) {
154 			if (curr_m->list.next == &vmcore_list)
155 				return acc;	/*EOF*/
156 			curr_m = list_entry(curr_m->list.next,
157 						struct vmcore, list);
158 			start = curr_m->paddr;
159 		}
160 		if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
161 			tsz = buflen;
162 		/* Calculate left bytes in current memory segment. */
163 		nr_bytes = (curr_m->size - (start - curr_m->paddr));
164 		if (tsz > nr_bytes)
165 			tsz = nr_bytes;
166 	}
167 	return acc;
168 }
169 
170 static int open_vmcore(struct inode *inode, struct file *filp)
171 {
172 	return 0;
173 }
174 
175 const struct file_operations proc_vmcore_operations = {
176 	.read		= read_vmcore,
177 	.open		= open_vmcore,
178 };
179 
180 static struct vmcore* __init get_new_element(void)
181 {
182 	struct vmcore *p;
183 
184 	p = kmalloc(sizeof(*p), GFP_KERNEL);
185 	if (p)
186 		memset(p, 0, sizeof(*p));
187 	return p;
188 }
189 
190 static u64 __init get_vmcore_size_elf64(char *elfptr)
191 {
192 	int i;
193 	u64 size;
194 	Elf64_Ehdr *ehdr_ptr;
195 	Elf64_Phdr *phdr_ptr;
196 
197 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
198 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
199 	size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
200 	for (i = 0; i < ehdr_ptr->e_phnum; i++) {
201 		size += phdr_ptr->p_memsz;
202 		phdr_ptr++;
203 	}
204 	return size;
205 }
206 
207 static u64 __init get_vmcore_size_elf32(char *elfptr)
208 {
209 	int i;
210 	u64 size;
211 	Elf32_Ehdr *ehdr_ptr;
212 	Elf32_Phdr *phdr_ptr;
213 
214 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
215 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
216 	size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr));
217 	for (i = 0; i < ehdr_ptr->e_phnum; i++) {
218 		size += phdr_ptr->p_memsz;
219 		phdr_ptr++;
220 	}
221 	return size;
222 }
223 
224 /* Merges all the PT_NOTE headers into one. */
225 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
226 						struct list_head *vc_list)
227 {
228 	int i, nr_ptnote=0, rc=0;
229 	char *tmp;
230 	Elf64_Ehdr *ehdr_ptr;
231 	Elf64_Phdr phdr, *phdr_ptr;
232 	Elf64_Nhdr *nhdr_ptr;
233 	u64 phdr_sz = 0, note_off;
234 
235 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
236 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
237 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
238 		int j;
239 		void *notes_section;
240 		struct vmcore *new;
241 		u64 offset, max_sz, sz, real_sz = 0;
242 		if (phdr_ptr->p_type != PT_NOTE)
243 			continue;
244 		nr_ptnote++;
245 		max_sz = phdr_ptr->p_memsz;
246 		offset = phdr_ptr->p_offset;
247 		notes_section = kmalloc(max_sz, GFP_KERNEL);
248 		if (!notes_section)
249 			return -ENOMEM;
250 		rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
251 		if (rc < 0) {
252 			kfree(notes_section);
253 			return rc;
254 		}
255 		nhdr_ptr = notes_section;
256 		for (j = 0; j < max_sz; j += sz) {
257 			if (nhdr_ptr->n_namesz == 0)
258 				break;
259 			sz = sizeof(Elf64_Nhdr) +
260 				((nhdr_ptr->n_namesz + 3) & ~3) +
261 				((nhdr_ptr->n_descsz + 3) & ~3);
262 			real_sz += sz;
263 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
264 		}
265 
266 		/* Add this contiguous chunk of notes section to vmcore list.*/
267 		new = get_new_element();
268 		if (!new) {
269 			kfree(notes_section);
270 			return -ENOMEM;
271 		}
272 		new->paddr = phdr_ptr->p_offset;
273 		new->size = real_sz;
274 		list_add_tail(&new->list, vc_list);
275 		phdr_sz += real_sz;
276 		kfree(notes_section);
277 	}
278 
279 	/* Prepare merged PT_NOTE program header. */
280 	phdr.p_type    = PT_NOTE;
281 	phdr.p_flags   = 0;
282 	note_off = sizeof(Elf64_Ehdr) +
283 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
284 	phdr.p_offset  = note_off;
285 	phdr.p_vaddr   = phdr.p_paddr = 0;
286 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
287 	phdr.p_align   = 0;
288 
289 	/* Add merged PT_NOTE program header*/
290 	tmp = elfptr + sizeof(Elf64_Ehdr);
291 	memcpy(tmp, &phdr, sizeof(phdr));
292 	tmp += sizeof(phdr);
293 
294 	/* Remove unwanted PT_NOTE program headers. */
295 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
296 	*elfsz = *elfsz - i;
297 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
298 
299 	/* Modify e_phnum to reflect merged headers. */
300 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
301 
302 	return 0;
303 }
304 
305 /* Merges all the PT_NOTE headers into one. */
306 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
307 						struct list_head *vc_list)
308 {
309 	int i, nr_ptnote=0, rc=0;
310 	char *tmp;
311 	Elf32_Ehdr *ehdr_ptr;
312 	Elf32_Phdr phdr, *phdr_ptr;
313 	Elf32_Nhdr *nhdr_ptr;
314 	u64 phdr_sz = 0, note_off;
315 
316 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
317 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
318 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
319 		int j;
320 		void *notes_section;
321 		struct vmcore *new;
322 		u64 offset, max_sz, sz, real_sz = 0;
323 		if (phdr_ptr->p_type != PT_NOTE)
324 			continue;
325 		nr_ptnote++;
326 		max_sz = phdr_ptr->p_memsz;
327 		offset = phdr_ptr->p_offset;
328 		notes_section = kmalloc(max_sz, GFP_KERNEL);
329 		if (!notes_section)
330 			return -ENOMEM;
331 		rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
332 		if (rc < 0) {
333 			kfree(notes_section);
334 			return rc;
335 		}
336 		nhdr_ptr = notes_section;
337 		for (j = 0; j < max_sz; j += sz) {
338 			if (nhdr_ptr->n_namesz == 0)
339 				break;
340 			sz = sizeof(Elf32_Nhdr) +
341 				((nhdr_ptr->n_namesz + 3) & ~3) +
342 				((nhdr_ptr->n_descsz + 3) & ~3);
343 			real_sz += sz;
344 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
345 		}
346 
347 		/* Add this contiguous chunk of notes section to vmcore list.*/
348 		new = get_new_element();
349 		if (!new) {
350 			kfree(notes_section);
351 			return -ENOMEM;
352 		}
353 		new->paddr = phdr_ptr->p_offset;
354 		new->size = real_sz;
355 		list_add_tail(&new->list, vc_list);
356 		phdr_sz += real_sz;
357 		kfree(notes_section);
358 	}
359 
360 	/* Prepare merged PT_NOTE program header. */
361 	phdr.p_type    = PT_NOTE;
362 	phdr.p_flags   = 0;
363 	note_off = sizeof(Elf32_Ehdr) +
364 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
365 	phdr.p_offset  = note_off;
366 	phdr.p_vaddr   = phdr.p_paddr = 0;
367 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
368 	phdr.p_align   = 0;
369 
370 	/* Add merged PT_NOTE program header*/
371 	tmp = elfptr + sizeof(Elf32_Ehdr);
372 	memcpy(tmp, &phdr, sizeof(phdr));
373 	tmp += sizeof(phdr);
374 
375 	/* Remove unwanted PT_NOTE program headers. */
376 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
377 	*elfsz = *elfsz - i;
378 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
379 
380 	/* Modify e_phnum to reflect merged headers. */
381 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
382 
383 	return 0;
384 }
385 
386 /* Add memory chunks represented by program headers to vmcore list. Also update
387  * the new offset fields of exported program headers. */
388 static int __init process_ptload_program_headers_elf64(char *elfptr,
389 						size_t elfsz,
390 						struct list_head *vc_list)
391 {
392 	int i;
393 	Elf64_Ehdr *ehdr_ptr;
394 	Elf64_Phdr *phdr_ptr;
395 	loff_t vmcore_off;
396 	struct vmcore *new;
397 
398 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
399 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
400 
401 	/* First program header is PT_NOTE header. */
402 	vmcore_off = sizeof(Elf64_Ehdr) +
403 			(ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
404 			phdr_ptr->p_memsz; /* Note sections */
405 
406 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
407 		if (phdr_ptr->p_type != PT_LOAD)
408 			continue;
409 
410 		/* Add this contiguous chunk of memory to vmcore list.*/
411 		new = get_new_element();
412 		if (!new)
413 			return -ENOMEM;
414 		new->paddr = phdr_ptr->p_offset;
415 		new->size = phdr_ptr->p_memsz;
416 		list_add_tail(&new->list, vc_list);
417 
418 		/* Update the program header offset. */
419 		phdr_ptr->p_offset = vmcore_off;
420 		vmcore_off = vmcore_off + phdr_ptr->p_memsz;
421 	}
422 	return 0;
423 }
424 
425 static int __init process_ptload_program_headers_elf32(char *elfptr,
426 						size_t elfsz,
427 						struct list_head *vc_list)
428 {
429 	int i;
430 	Elf32_Ehdr *ehdr_ptr;
431 	Elf32_Phdr *phdr_ptr;
432 	loff_t vmcore_off;
433 	struct vmcore *new;
434 
435 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
436 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
437 
438 	/* First program header is PT_NOTE header. */
439 	vmcore_off = sizeof(Elf32_Ehdr) +
440 			(ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) +
441 			phdr_ptr->p_memsz; /* Note sections */
442 
443 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
444 		if (phdr_ptr->p_type != PT_LOAD)
445 			continue;
446 
447 		/* Add this contiguous chunk of memory to vmcore list.*/
448 		new = get_new_element();
449 		if (!new)
450 			return -ENOMEM;
451 		new->paddr = phdr_ptr->p_offset;
452 		new->size = phdr_ptr->p_memsz;
453 		list_add_tail(&new->list, vc_list);
454 
455 		/* Update the program header offset */
456 		phdr_ptr->p_offset = vmcore_off;
457 		vmcore_off = vmcore_off + phdr_ptr->p_memsz;
458 	}
459 	return 0;
460 }
461 
462 /* Sets offset fields of vmcore elements. */
463 static void __init set_vmcore_list_offsets_elf64(char *elfptr,
464 						struct list_head *vc_list)
465 {
466 	loff_t vmcore_off;
467 	Elf64_Ehdr *ehdr_ptr;
468 	struct vmcore *m;
469 
470 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
471 
472 	/* Skip Elf header and program headers. */
473 	vmcore_off = sizeof(Elf64_Ehdr) +
474 			(ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
475 
476 	list_for_each_entry(m, vc_list, list) {
477 		m->offset = vmcore_off;
478 		vmcore_off += m->size;
479 	}
480 }
481 
482 /* Sets offset fields of vmcore elements. */
483 static void __init set_vmcore_list_offsets_elf32(char *elfptr,
484 						struct list_head *vc_list)
485 {
486 	loff_t vmcore_off;
487 	Elf32_Ehdr *ehdr_ptr;
488 	struct vmcore *m;
489 
490 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
491 
492 	/* Skip Elf header and program headers. */
493 	vmcore_off = sizeof(Elf32_Ehdr) +
494 			(ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr);
495 
496 	list_for_each_entry(m, vc_list, list) {
497 		m->offset = vmcore_off;
498 		vmcore_off += m->size;
499 	}
500 }
501 
502 static int __init parse_crash_elf64_headers(void)
503 {
504 	int rc=0;
505 	Elf64_Ehdr ehdr;
506 	u64 addr;
507 
508 	addr = elfcorehdr_addr;
509 
510 	/* Read Elf header */
511 	rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
512 	if (rc < 0)
513 		return rc;
514 
515 	/* Do some basic Verification. */
516 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
517 		(ehdr.e_type != ET_CORE) ||
518 		!elf_check_arch(&ehdr) ||
519 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
520 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
521 		ehdr.e_version != EV_CURRENT ||
522 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
523 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
524 		ehdr.e_phnum == 0) {
525 		printk(KERN_WARNING "Warning: Core image elf header is not"
526 					"sane\n");
527 		return -EINVAL;
528 	}
529 
530 	/* Read in all elf headers. */
531 	elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
532 	elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
533 	if (!elfcorebuf)
534 		return -ENOMEM;
535 	addr = elfcorehdr_addr;
536 	rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
537 	if (rc < 0) {
538 		kfree(elfcorebuf);
539 		return rc;
540 	}
541 
542 	/* Merge all PT_NOTE headers into one. */
543 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
544 	if (rc) {
545 		kfree(elfcorebuf);
546 		return rc;
547 	}
548 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
549 							&vmcore_list);
550 	if (rc) {
551 		kfree(elfcorebuf);
552 		return rc;
553 	}
554 	set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
555 	return 0;
556 }
557 
558 static int __init parse_crash_elf32_headers(void)
559 {
560 	int rc=0;
561 	Elf32_Ehdr ehdr;
562 	u64 addr;
563 
564 	addr = elfcorehdr_addr;
565 
566 	/* Read Elf header */
567 	rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
568 	if (rc < 0)
569 		return rc;
570 
571 	/* Do some basic Verification. */
572 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
573 		(ehdr.e_type != ET_CORE) ||
574 		!elf_check_arch(&ehdr) ||
575 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
576 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
577 		ehdr.e_version != EV_CURRENT ||
578 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
579 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
580 		ehdr.e_phnum == 0) {
581 		printk(KERN_WARNING "Warning: Core image elf header is not"
582 					"sane\n");
583 		return -EINVAL;
584 	}
585 
586 	/* Read in all elf headers. */
587 	elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
588 	elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
589 	if (!elfcorebuf)
590 		return -ENOMEM;
591 	addr = elfcorehdr_addr;
592 	rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
593 	if (rc < 0) {
594 		kfree(elfcorebuf);
595 		return rc;
596 	}
597 
598 	/* Merge all PT_NOTE headers into one. */
599 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
600 	if (rc) {
601 		kfree(elfcorebuf);
602 		return rc;
603 	}
604 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
605 								&vmcore_list);
606 	if (rc) {
607 		kfree(elfcorebuf);
608 		return rc;
609 	}
610 	set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
611 	return 0;
612 }
613 
614 static int __init parse_crash_elf_headers(void)
615 {
616 	unsigned char e_ident[EI_NIDENT];
617 	u64 addr;
618 	int rc=0;
619 
620 	addr = elfcorehdr_addr;
621 	rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
622 	if (rc < 0)
623 		return rc;
624 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
625 		printk(KERN_WARNING "Warning: Core image elf header"
626 					" not found\n");
627 		return -EINVAL;
628 	}
629 
630 	if (e_ident[EI_CLASS] == ELFCLASS64) {
631 		rc = parse_crash_elf64_headers();
632 		if (rc)
633 			return rc;
634 
635 		/* Determine vmcore size. */
636 		vmcore_size = get_vmcore_size_elf64(elfcorebuf);
637 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
638 		rc = parse_crash_elf32_headers();
639 		if (rc)
640 			return rc;
641 
642 		/* Determine vmcore size. */
643 		vmcore_size = get_vmcore_size_elf32(elfcorebuf);
644 	} else {
645 		printk(KERN_WARNING "Warning: Core image elf header is not"
646 					" sane\n");
647 		return -EINVAL;
648 	}
649 	return 0;
650 }
651 
652 /* Init function for vmcore module. */
653 static int __init vmcore_init(void)
654 {
655 	int rc = 0;
656 
657 	/* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
658 	if (!(elfcorehdr_addr < ELFCORE_ADDR_MAX))
659 		return rc;
660 	rc = parse_crash_elf_headers();
661 	if (rc) {
662 		printk(KERN_WARNING "Kdump: vmcore not initialized\n");
663 		return rc;
664 	}
665 
666 	/* Initialize /proc/vmcore size if proc is already up. */
667 	if (proc_vmcore)
668 		proc_vmcore->size = vmcore_size;
669 	return 0;
670 }
671 module_init(vmcore_init)
672