1 /* 2 * fs/proc/vmcore.c Interface for accessing the crash 3 * dump from the system's previous life. 4 * Heavily borrowed from fs/proc/kcore.c 5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 6 * Copyright (C) IBM Corporation, 2004. All rights reserved 7 * 8 */ 9 10 #include <linux/mm.h> 11 #include <linux/proc_fs.h> 12 #include <linux/user.h> 13 #include <linux/elf.h> 14 #include <linux/elfcore.h> 15 #include <linux/slab.h> 16 #include <linux/highmem.h> 17 #include <linux/bootmem.h> 18 #include <linux/init.h> 19 #include <linux/crash_dump.h> 20 #include <linux/list.h> 21 #include <asm/uaccess.h> 22 #include <asm/io.h> 23 24 /* List representing chunks of contiguous memory areas and their offsets in 25 * vmcore file. 26 */ 27 static LIST_HEAD(vmcore_list); 28 29 /* Stores the pointer to the buffer containing kernel elf core headers. */ 30 static char *elfcorebuf; 31 static size_t elfcorebuf_sz; 32 33 /* Total size of vmcore file. */ 34 static u64 vmcore_size; 35 36 static struct proc_dir_entry *proc_vmcore = NULL; 37 38 /* Reads a page from the oldmem device from given offset. */ 39 static ssize_t read_from_oldmem(char *buf, size_t count, 40 u64 *ppos, int userbuf) 41 { 42 unsigned long pfn, offset; 43 size_t nr_bytes; 44 ssize_t read = 0, tmp; 45 46 if (!count) 47 return 0; 48 49 offset = (unsigned long)(*ppos % PAGE_SIZE); 50 pfn = (unsigned long)(*ppos / PAGE_SIZE); 51 52 do { 53 if (count > (PAGE_SIZE - offset)) 54 nr_bytes = PAGE_SIZE - offset; 55 else 56 nr_bytes = count; 57 58 tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); 59 if (tmp < 0) 60 return tmp; 61 *ppos += nr_bytes; 62 count -= nr_bytes; 63 buf += nr_bytes; 64 read += nr_bytes; 65 ++pfn; 66 offset = 0; 67 } while (count); 68 69 return read; 70 } 71 72 /* Maps vmcore file offset to respective physical address in memroy. */ 73 static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list, 74 struct vmcore **m_ptr) 75 { 76 struct vmcore *m; 77 u64 paddr; 78 79 list_for_each_entry(m, vc_list, list) { 80 u64 start, end; 81 start = m->offset; 82 end = m->offset + m->size - 1; 83 if (offset >= start && offset <= end) { 84 paddr = m->paddr + offset - start; 85 *m_ptr = m; 86 return paddr; 87 } 88 } 89 *m_ptr = NULL; 90 return 0; 91 } 92 93 /* Read from the ELF header and then the crash dump. On error, negative value is 94 * returned otherwise number of bytes read are returned. 95 */ 96 static ssize_t read_vmcore(struct file *file, char __user *buffer, 97 size_t buflen, loff_t *fpos) 98 { 99 ssize_t acc = 0, tmp; 100 size_t tsz; 101 u64 start, nr_bytes; 102 struct vmcore *curr_m = NULL; 103 104 if (buflen == 0 || *fpos >= vmcore_size) 105 return 0; 106 107 /* trim buflen to not go beyond EOF */ 108 if (buflen > vmcore_size - *fpos) 109 buflen = vmcore_size - *fpos; 110 111 /* Read ELF core header */ 112 if (*fpos < elfcorebuf_sz) { 113 tsz = elfcorebuf_sz - *fpos; 114 if (buflen < tsz) 115 tsz = buflen; 116 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) 117 return -EFAULT; 118 buflen -= tsz; 119 *fpos += tsz; 120 buffer += tsz; 121 acc += tsz; 122 123 /* leave now if filled buffer already */ 124 if (buflen == 0) 125 return acc; 126 } 127 128 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m); 129 if (!curr_m) 130 return -EINVAL; 131 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) 132 tsz = buflen; 133 134 /* Calculate left bytes in current memory segment. */ 135 nr_bytes = (curr_m->size - (start - curr_m->paddr)); 136 if (tsz > nr_bytes) 137 tsz = nr_bytes; 138 139 while (buflen) { 140 tmp = read_from_oldmem(buffer, tsz, &start, 1); 141 if (tmp < 0) 142 return tmp; 143 buflen -= tsz; 144 *fpos += tsz; 145 buffer += tsz; 146 acc += tsz; 147 if (start >= (curr_m->paddr + curr_m->size)) { 148 if (curr_m->list.next == &vmcore_list) 149 return acc; /*EOF*/ 150 curr_m = list_entry(curr_m->list.next, 151 struct vmcore, list); 152 start = curr_m->paddr; 153 } 154 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) 155 tsz = buflen; 156 /* Calculate left bytes in current memory segment. */ 157 nr_bytes = (curr_m->size - (start - curr_m->paddr)); 158 if (tsz > nr_bytes) 159 tsz = nr_bytes; 160 } 161 return acc; 162 } 163 164 static const struct file_operations proc_vmcore_operations = { 165 .read = read_vmcore, 166 .llseek = generic_file_llseek, 167 }; 168 169 static struct vmcore* __init get_new_element(void) 170 { 171 return kzalloc(sizeof(struct vmcore), GFP_KERNEL); 172 } 173 174 static u64 __init get_vmcore_size_elf64(char *elfptr) 175 { 176 int i; 177 u64 size; 178 Elf64_Ehdr *ehdr_ptr; 179 Elf64_Phdr *phdr_ptr; 180 181 ehdr_ptr = (Elf64_Ehdr *)elfptr; 182 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); 183 size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr)); 184 for (i = 0; i < ehdr_ptr->e_phnum; i++) { 185 size += phdr_ptr->p_memsz; 186 phdr_ptr++; 187 } 188 return size; 189 } 190 191 static u64 __init get_vmcore_size_elf32(char *elfptr) 192 { 193 int i; 194 u64 size; 195 Elf32_Ehdr *ehdr_ptr; 196 Elf32_Phdr *phdr_ptr; 197 198 ehdr_ptr = (Elf32_Ehdr *)elfptr; 199 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); 200 size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr)); 201 for (i = 0; i < ehdr_ptr->e_phnum; i++) { 202 size += phdr_ptr->p_memsz; 203 phdr_ptr++; 204 } 205 return size; 206 } 207 208 /* Merges all the PT_NOTE headers into one. */ 209 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, 210 struct list_head *vc_list) 211 { 212 int i, nr_ptnote=0, rc=0; 213 char *tmp; 214 Elf64_Ehdr *ehdr_ptr; 215 Elf64_Phdr phdr, *phdr_ptr; 216 Elf64_Nhdr *nhdr_ptr; 217 u64 phdr_sz = 0, note_off; 218 219 ehdr_ptr = (Elf64_Ehdr *)elfptr; 220 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); 221 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 222 int j; 223 void *notes_section; 224 struct vmcore *new; 225 u64 offset, max_sz, sz, real_sz = 0; 226 if (phdr_ptr->p_type != PT_NOTE) 227 continue; 228 nr_ptnote++; 229 max_sz = phdr_ptr->p_memsz; 230 offset = phdr_ptr->p_offset; 231 notes_section = kmalloc(max_sz, GFP_KERNEL); 232 if (!notes_section) 233 return -ENOMEM; 234 rc = read_from_oldmem(notes_section, max_sz, &offset, 0); 235 if (rc < 0) { 236 kfree(notes_section); 237 return rc; 238 } 239 nhdr_ptr = notes_section; 240 for (j = 0; j < max_sz; j += sz) { 241 if (nhdr_ptr->n_namesz == 0) 242 break; 243 sz = sizeof(Elf64_Nhdr) + 244 ((nhdr_ptr->n_namesz + 3) & ~3) + 245 ((nhdr_ptr->n_descsz + 3) & ~3); 246 real_sz += sz; 247 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); 248 } 249 250 /* Add this contiguous chunk of notes section to vmcore list.*/ 251 new = get_new_element(); 252 if (!new) { 253 kfree(notes_section); 254 return -ENOMEM; 255 } 256 new->paddr = phdr_ptr->p_offset; 257 new->size = real_sz; 258 list_add_tail(&new->list, vc_list); 259 phdr_sz += real_sz; 260 kfree(notes_section); 261 } 262 263 /* Prepare merged PT_NOTE program header. */ 264 phdr.p_type = PT_NOTE; 265 phdr.p_flags = 0; 266 note_off = sizeof(Elf64_Ehdr) + 267 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); 268 phdr.p_offset = note_off; 269 phdr.p_vaddr = phdr.p_paddr = 0; 270 phdr.p_filesz = phdr.p_memsz = phdr_sz; 271 phdr.p_align = 0; 272 273 /* Add merged PT_NOTE program header*/ 274 tmp = elfptr + sizeof(Elf64_Ehdr); 275 memcpy(tmp, &phdr, sizeof(phdr)); 276 tmp += sizeof(phdr); 277 278 /* Remove unwanted PT_NOTE program headers. */ 279 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); 280 *elfsz = *elfsz - i; 281 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); 282 283 /* Modify e_phnum to reflect merged headers. */ 284 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 285 286 return 0; 287 } 288 289 /* Merges all the PT_NOTE headers into one. */ 290 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, 291 struct list_head *vc_list) 292 { 293 int i, nr_ptnote=0, rc=0; 294 char *tmp; 295 Elf32_Ehdr *ehdr_ptr; 296 Elf32_Phdr phdr, *phdr_ptr; 297 Elf32_Nhdr *nhdr_ptr; 298 u64 phdr_sz = 0, note_off; 299 300 ehdr_ptr = (Elf32_Ehdr *)elfptr; 301 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); 302 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 303 int j; 304 void *notes_section; 305 struct vmcore *new; 306 u64 offset, max_sz, sz, real_sz = 0; 307 if (phdr_ptr->p_type != PT_NOTE) 308 continue; 309 nr_ptnote++; 310 max_sz = phdr_ptr->p_memsz; 311 offset = phdr_ptr->p_offset; 312 notes_section = kmalloc(max_sz, GFP_KERNEL); 313 if (!notes_section) 314 return -ENOMEM; 315 rc = read_from_oldmem(notes_section, max_sz, &offset, 0); 316 if (rc < 0) { 317 kfree(notes_section); 318 return rc; 319 } 320 nhdr_ptr = notes_section; 321 for (j = 0; j < max_sz; j += sz) { 322 if (nhdr_ptr->n_namesz == 0) 323 break; 324 sz = sizeof(Elf32_Nhdr) + 325 ((nhdr_ptr->n_namesz + 3) & ~3) + 326 ((nhdr_ptr->n_descsz + 3) & ~3); 327 real_sz += sz; 328 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); 329 } 330 331 /* Add this contiguous chunk of notes section to vmcore list.*/ 332 new = get_new_element(); 333 if (!new) { 334 kfree(notes_section); 335 return -ENOMEM; 336 } 337 new->paddr = phdr_ptr->p_offset; 338 new->size = real_sz; 339 list_add_tail(&new->list, vc_list); 340 phdr_sz += real_sz; 341 kfree(notes_section); 342 } 343 344 /* Prepare merged PT_NOTE program header. */ 345 phdr.p_type = PT_NOTE; 346 phdr.p_flags = 0; 347 note_off = sizeof(Elf32_Ehdr) + 348 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); 349 phdr.p_offset = note_off; 350 phdr.p_vaddr = phdr.p_paddr = 0; 351 phdr.p_filesz = phdr.p_memsz = phdr_sz; 352 phdr.p_align = 0; 353 354 /* Add merged PT_NOTE program header*/ 355 tmp = elfptr + sizeof(Elf32_Ehdr); 356 memcpy(tmp, &phdr, sizeof(phdr)); 357 tmp += sizeof(phdr); 358 359 /* Remove unwanted PT_NOTE program headers. */ 360 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); 361 *elfsz = *elfsz - i; 362 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); 363 364 /* Modify e_phnum to reflect merged headers. */ 365 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 366 367 return 0; 368 } 369 370 /* Add memory chunks represented by program headers to vmcore list. Also update 371 * the new offset fields of exported program headers. */ 372 static int __init process_ptload_program_headers_elf64(char *elfptr, 373 size_t elfsz, 374 struct list_head *vc_list) 375 { 376 int i; 377 Elf64_Ehdr *ehdr_ptr; 378 Elf64_Phdr *phdr_ptr; 379 loff_t vmcore_off; 380 struct vmcore *new; 381 382 ehdr_ptr = (Elf64_Ehdr *)elfptr; 383 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ 384 385 /* First program header is PT_NOTE header. */ 386 vmcore_off = sizeof(Elf64_Ehdr) + 387 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) + 388 phdr_ptr->p_memsz; /* Note sections */ 389 390 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 391 if (phdr_ptr->p_type != PT_LOAD) 392 continue; 393 394 /* Add this contiguous chunk of memory to vmcore list.*/ 395 new = get_new_element(); 396 if (!new) 397 return -ENOMEM; 398 new->paddr = phdr_ptr->p_offset; 399 new->size = phdr_ptr->p_memsz; 400 list_add_tail(&new->list, vc_list); 401 402 /* Update the program header offset. */ 403 phdr_ptr->p_offset = vmcore_off; 404 vmcore_off = vmcore_off + phdr_ptr->p_memsz; 405 } 406 return 0; 407 } 408 409 static int __init process_ptload_program_headers_elf32(char *elfptr, 410 size_t elfsz, 411 struct list_head *vc_list) 412 { 413 int i; 414 Elf32_Ehdr *ehdr_ptr; 415 Elf32_Phdr *phdr_ptr; 416 loff_t vmcore_off; 417 struct vmcore *new; 418 419 ehdr_ptr = (Elf32_Ehdr *)elfptr; 420 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ 421 422 /* First program header is PT_NOTE header. */ 423 vmcore_off = sizeof(Elf32_Ehdr) + 424 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) + 425 phdr_ptr->p_memsz; /* Note sections */ 426 427 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 428 if (phdr_ptr->p_type != PT_LOAD) 429 continue; 430 431 /* Add this contiguous chunk of memory to vmcore list.*/ 432 new = get_new_element(); 433 if (!new) 434 return -ENOMEM; 435 new->paddr = phdr_ptr->p_offset; 436 new->size = phdr_ptr->p_memsz; 437 list_add_tail(&new->list, vc_list); 438 439 /* Update the program header offset */ 440 phdr_ptr->p_offset = vmcore_off; 441 vmcore_off = vmcore_off + phdr_ptr->p_memsz; 442 } 443 return 0; 444 } 445 446 /* Sets offset fields of vmcore elements. */ 447 static void __init set_vmcore_list_offsets_elf64(char *elfptr, 448 struct list_head *vc_list) 449 { 450 loff_t vmcore_off; 451 Elf64_Ehdr *ehdr_ptr; 452 struct vmcore *m; 453 454 ehdr_ptr = (Elf64_Ehdr *)elfptr; 455 456 /* Skip Elf header and program headers. */ 457 vmcore_off = sizeof(Elf64_Ehdr) + 458 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr); 459 460 list_for_each_entry(m, vc_list, list) { 461 m->offset = vmcore_off; 462 vmcore_off += m->size; 463 } 464 } 465 466 /* Sets offset fields of vmcore elements. */ 467 static void __init set_vmcore_list_offsets_elf32(char *elfptr, 468 struct list_head *vc_list) 469 { 470 loff_t vmcore_off; 471 Elf32_Ehdr *ehdr_ptr; 472 struct vmcore *m; 473 474 ehdr_ptr = (Elf32_Ehdr *)elfptr; 475 476 /* Skip Elf header and program headers. */ 477 vmcore_off = sizeof(Elf32_Ehdr) + 478 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr); 479 480 list_for_each_entry(m, vc_list, list) { 481 m->offset = vmcore_off; 482 vmcore_off += m->size; 483 } 484 } 485 486 static int __init parse_crash_elf64_headers(void) 487 { 488 int rc=0; 489 Elf64_Ehdr ehdr; 490 u64 addr; 491 492 addr = elfcorehdr_addr; 493 494 /* Read Elf header */ 495 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0); 496 if (rc < 0) 497 return rc; 498 499 /* Do some basic Verification. */ 500 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 501 (ehdr.e_type != ET_CORE) || 502 !vmcore_elf_check_arch(&ehdr) || 503 ehdr.e_ident[EI_CLASS] != ELFCLASS64 || 504 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 505 ehdr.e_version != EV_CURRENT || 506 ehdr.e_ehsize != sizeof(Elf64_Ehdr) || 507 ehdr.e_phentsize != sizeof(Elf64_Phdr) || 508 ehdr.e_phnum == 0) { 509 printk(KERN_WARNING "Warning: Core image elf header is not" 510 "sane\n"); 511 return -EINVAL; 512 } 513 514 /* Read in all elf headers. */ 515 elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); 516 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); 517 if (!elfcorebuf) 518 return -ENOMEM; 519 addr = elfcorehdr_addr; 520 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); 521 if (rc < 0) { 522 kfree(elfcorebuf); 523 return rc; 524 } 525 526 /* Merge all PT_NOTE headers into one. */ 527 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); 528 if (rc) { 529 kfree(elfcorebuf); 530 return rc; 531 } 532 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, 533 &vmcore_list); 534 if (rc) { 535 kfree(elfcorebuf); 536 return rc; 537 } 538 set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); 539 return 0; 540 } 541 542 static int __init parse_crash_elf32_headers(void) 543 { 544 int rc=0; 545 Elf32_Ehdr ehdr; 546 u64 addr; 547 548 addr = elfcorehdr_addr; 549 550 /* Read Elf header */ 551 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0); 552 if (rc < 0) 553 return rc; 554 555 /* Do some basic Verification. */ 556 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 557 (ehdr.e_type != ET_CORE) || 558 !elf_check_arch(&ehdr) || 559 ehdr.e_ident[EI_CLASS] != ELFCLASS32|| 560 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 561 ehdr.e_version != EV_CURRENT || 562 ehdr.e_ehsize != sizeof(Elf32_Ehdr) || 563 ehdr.e_phentsize != sizeof(Elf32_Phdr) || 564 ehdr.e_phnum == 0) { 565 printk(KERN_WARNING "Warning: Core image elf header is not" 566 "sane\n"); 567 return -EINVAL; 568 } 569 570 /* Read in all elf headers. */ 571 elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); 572 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); 573 if (!elfcorebuf) 574 return -ENOMEM; 575 addr = elfcorehdr_addr; 576 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); 577 if (rc < 0) { 578 kfree(elfcorebuf); 579 return rc; 580 } 581 582 /* Merge all PT_NOTE headers into one. */ 583 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); 584 if (rc) { 585 kfree(elfcorebuf); 586 return rc; 587 } 588 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, 589 &vmcore_list); 590 if (rc) { 591 kfree(elfcorebuf); 592 return rc; 593 } 594 set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); 595 return 0; 596 } 597 598 static int __init parse_crash_elf_headers(void) 599 { 600 unsigned char e_ident[EI_NIDENT]; 601 u64 addr; 602 int rc=0; 603 604 addr = elfcorehdr_addr; 605 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0); 606 if (rc < 0) 607 return rc; 608 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { 609 printk(KERN_WARNING "Warning: Core image elf header" 610 " not found\n"); 611 return -EINVAL; 612 } 613 614 if (e_ident[EI_CLASS] == ELFCLASS64) { 615 rc = parse_crash_elf64_headers(); 616 if (rc) 617 return rc; 618 619 /* Determine vmcore size. */ 620 vmcore_size = get_vmcore_size_elf64(elfcorebuf); 621 } else if (e_ident[EI_CLASS] == ELFCLASS32) { 622 rc = parse_crash_elf32_headers(); 623 if (rc) 624 return rc; 625 626 /* Determine vmcore size. */ 627 vmcore_size = get_vmcore_size_elf32(elfcorebuf); 628 } else { 629 printk(KERN_WARNING "Warning: Core image elf header is not" 630 " sane\n"); 631 return -EINVAL; 632 } 633 return 0; 634 } 635 636 /* Init function for vmcore module. */ 637 static int __init vmcore_init(void) 638 { 639 int rc = 0; 640 641 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/ 642 if (!(is_vmcore_usable())) 643 return rc; 644 rc = parse_crash_elf_headers(); 645 if (rc) { 646 printk(KERN_WARNING "Kdump: vmcore not initialized\n"); 647 return rc; 648 } 649 650 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); 651 if (proc_vmcore) 652 proc_vmcore->size = vmcore_size; 653 return 0; 654 } 655 module_init(vmcore_init) 656