1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/proc/kcore.c kernel ELF core dumper
4 *
5 * Modelled on fs/exec.c:aout_core_dump()
6 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
7 * ELF version written by David Howells <David.Howells@nexor.co.uk>
8 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
9 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
10 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
11 */
12
13 #include <linux/vmcore_info.h>
14 #include <linux/mm.h>
15 #include <linux/proc_fs.h>
16 #include <linux/kcore.h>
17 #include <linux/user.h>
18 #include <linux/capability.h>
19 #include <linux/elf.h>
20 #include <linux/elfcore.h>
21 #include <linux/vmalloc.h>
22 #include <linux/highmem.h>
23 #include <linux/printk.h>
24 #include <linux/memblock.h>
25 #include <linux/init.h>
26 #include <linux/slab.h>
27 #include <linux/uio.h>
28 #include <asm/io.h>
29 #include <linux/list.h>
30 #include <linux/ioport.h>
31 #include <linux/memory.h>
32 #include <linux/sched/task.h>
33 #include <linux/security.h>
34 #include <asm/sections.h>
35 #include "internal.h"
36
37 #define CORE_STR "CORE"
38
39 #ifndef ELF_CORE_EFLAGS
40 #define ELF_CORE_EFLAGS 0
41 #endif
42
43 static struct proc_dir_entry *proc_root_kcore;
44
45
46 #ifndef kc_vaddr_to_offset
47 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
48 #endif
49 #ifndef kc_offset_to_vaddr
50 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
51 #endif
52
53 #ifndef kc_xlate_dev_mem_ptr
54 #define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
kc_xlate_dev_mem_ptr(phys_addr_t phys)55 static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
56 {
57 return __va(phys);
58 }
59 #endif
60 #ifndef kc_unxlate_dev_mem_ptr
61 #define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
kc_unxlate_dev_mem_ptr(phys_addr_t phys,void * virt)62 static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
63 {
64 }
65 #endif
66
67 static LIST_HEAD(kclist_head);
68 static int kcore_nphdr;
69 static size_t kcore_phdrs_len;
70 static size_t kcore_notes_len;
71 static size_t kcore_data_offset;
72 DEFINE_STATIC_PERCPU_RWSEM(kclist_lock);
73 static int kcore_need_update = 1;
74
75 /*
76 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
77 * Same as oldmem_pfn_is_ram in vmcore
78 */
79 static int (*mem_pfn_is_ram)(unsigned long pfn);
80
register_mem_pfn_is_ram(int (* fn)(unsigned long pfn))81 int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn))
82 {
83 if (mem_pfn_is_ram)
84 return -EBUSY;
85 mem_pfn_is_ram = fn;
86 return 0;
87 }
88
pfn_is_ram(unsigned long pfn)89 static int pfn_is_ram(unsigned long pfn)
90 {
91 if (mem_pfn_is_ram)
92 return mem_pfn_is_ram(pfn);
93 else
94 return 1;
95 }
96
97 /* This doesn't grab kclist_lock, so it should only be used at init time. */
kclist_add(struct kcore_list * new,void * addr,size_t size,int type)98 void __init kclist_add(struct kcore_list *new, void *addr, size_t size,
99 int type)
100 {
101 new->addr = (unsigned long)addr;
102 new->size = size;
103 new->type = type;
104
105 list_add_tail(&new->list, &kclist_head);
106 }
107
update_kcore_size(void)108 static void update_kcore_size(void)
109 {
110 size_t try, size;
111 struct kcore_list *m;
112
113 kcore_nphdr = 1; /* PT_NOTE */
114 size = 0;
115
116 list_for_each_entry(m, &kclist_head, list) {
117 try = kc_vaddr_to_offset((size_t)m->addr + m->size);
118 if (try > size)
119 size = try;
120 kcore_nphdr++;
121 }
122
123 kcore_phdrs_len = kcore_nphdr * sizeof(struct elf_phdr);
124 kcore_notes_len = (4 * sizeof(struct elf_note) +
125 3 * ALIGN(sizeof(CORE_STR), 4) +
126 VMCOREINFO_NOTE_NAME_BYTES +
127 ALIGN(sizeof(struct elf_prstatus), 4) +
128 ALIGN(sizeof(struct elf_prpsinfo), 4) +
129 ALIGN(arch_task_struct_size, 4) +
130 ALIGN(vmcoreinfo_size, 4));
131 kcore_data_offset = PAGE_ALIGN(sizeof(struct elfhdr) + kcore_phdrs_len +
132 kcore_notes_len);
133 proc_root_kcore->size = kcore_data_offset + size;
134 }
135
136 #ifdef CONFIG_HIGHMEM
137 /*
138 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
139 * because memory hole is not as big as !HIGHMEM case.
140 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
141 */
kcore_ram_list(struct list_head * head)142 static int kcore_ram_list(struct list_head *head)
143 {
144 struct kcore_list *ent;
145
146 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
147 if (!ent)
148 return -ENOMEM;
149 ent->addr = (unsigned long)__va(0);
150 ent->size = max_low_pfn << PAGE_SHIFT;
151 ent->type = KCORE_RAM;
152 list_add(&ent->list, head);
153 return 0;
154 }
155
156 #else /* !CONFIG_HIGHMEM */
157
158 #ifdef CONFIG_SPARSEMEM_VMEMMAP
159 /* calculate vmemmap's address from given system ram pfn and register it */
160 static int
get_sparsemem_vmemmap_info(struct kcore_list * ent,struct list_head * head)161 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
162 {
163 unsigned long pfn = __pa(ent->addr) >> PAGE_SHIFT;
164 unsigned long nr_pages = ent->size >> PAGE_SHIFT;
165 unsigned long start, end;
166 struct kcore_list *vmm, *tmp;
167
168
169 start = ((unsigned long)pfn_to_page(pfn)) & PAGE_MASK;
170 end = ((unsigned long)pfn_to_page(pfn + nr_pages)) - 1;
171 end = PAGE_ALIGN(end);
172 /* overlap check (because we have to align page */
173 list_for_each_entry(tmp, head, list) {
174 if (tmp->type != KCORE_VMEMMAP)
175 continue;
176 if (start < tmp->addr + tmp->size)
177 if (end > tmp->addr)
178 end = tmp->addr;
179 }
180 if (start < end) {
181 vmm = kmalloc(sizeof(*vmm), GFP_KERNEL);
182 if (!vmm)
183 return 0;
184 vmm->addr = start;
185 vmm->size = end - start;
186 vmm->type = KCORE_VMEMMAP;
187 list_add_tail(&vmm->list, head);
188 }
189 return 1;
190
191 }
192 #else
193 static int
get_sparsemem_vmemmap_info(struct kcore_list * ent,struct list_head * head)194 get_sparsemem_vmemmap_info(struct kcore_list *ent, struct list_head *head)
195 {
196 return 1;
197 }
198
199 #endif
200
201 static int
kclist_add_private(unsigned long pfn,unsigned long nr_pages,void * arg)202 kclist_add_private(unsigned long pfn, unsigned long nr_pages, void *arg)
203 {
204 struct list_head *head = (struct list_head *)arg;
205 struct kcore_list *ent;
206 struct page *p;
207
208 if (!pfn_valid(pfn))
209 return 1;
210
211 p = pfn_to_page(pfn);
212
213 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
214 if (!ent)
215 return -ENOMEM;
216 ent->addr = (unsigned long)page_to_virt(p);
217 ent->size = nr_pages << PAGE_SHIFT;
218
219 if (!virt_addr_valid((void *)ent->addr))
220 goto free_out;
221
222 /* cut not-mapped area. ....from ppc-32 code. */
223 if (ULONG_MAX - ent->addr < ent->size)
224 ent->size = ULONG_MAX - ent->addr;
225
226 /*
227 * We've already checked virt_addr_valid so we know this address
228 * is a valid pointer, therefore we can check against it to determine
229 * if we need to trim
230 */
231 if (VMALLOC_START > ent->addr) {
232 if (VMALLOC_START - ent->addr < ent->size)
233 ent->size = VMALLOC_START - ent->addr;
234 }
235
236 ent->type = KCORE_RAM;
237 list_add_tail(&ent->list, head);
238
239 if (!get_sparsemem_vmemmap_info(ent, head)) {
240 list_del(&ent->list);
241 goto free_out;
242 }
243
244 return 0;
245 free_out:
246 kfree(ent);
247 return 1;
248 }
249
kcore_ram_list(struct list_head * list)250 static int kcore_ram_list(struct list_head *list)
251 {
252 int nid, ret;
253 unsigned long end_pfn;
254
255 /* Not initialized....update now */
256 /* find out "max pfn" */
257 end_pfn = 0;
258 for_each_node_state(nid, N_MEMORY) {
259 unsigned long node_end;
260 node_end = node_end_pfn(nid);
261 if (end_pfn < node_end)
262 end_pfn = node_end;
263 }
264 /* scan 0 to max_pfn */
265 ret = walk_system_ram_range(0, end_pfn, list, kclist_add_private);
266 if (ret)
267 return -ENOMEM;
268 return 0;
269 }
270 #endif /* CONFIG_HIGHMEM */
271
kcore_update_ram(void)272 static int kcore_update_ram(void)
273 {
274 LIST_HEAD(list);
275 LIST_HEAD(garbage);
276 struct kcore_list *tmp, *pos;
277 int ret = 0;
278
279 percpu_down_write(&kclist_lock);
280 if (!xchg(&kcore_need_update, 0))
281 goto out;
282
283 ret = kcore_ram_list(&list);
284 if (ret) {
285 /* Couldn't get the RAM list, try again next time. */
286 WRITE_ONCE(kcore_need_update, 1);
287 list_splice_tail(&list, &garbage);
288 goto out;
289 }
290
291 list_for_each_entry_safe(pos, tmp, &kclist_head, list) {
292 if (pos->type == KCORE_RAM || pos->type == KCORE_VMEMMAP)
293 list_move(&pos->list, &garbage);
294 }
295 list_splice_tail(&list, &kclist_head);
296
297 update_kcore_size();
298
299 out:
300 percpu_up_write(&kclist_lock);
301 list_for_each_entry_safe(pos, tmp, &garbage, list) {
302 list_del(&pos->list);
303 kfree(pos);
304 }
305 return ret;
306 }
307
append_kcore_note(char * notes,size_t * i,const char * name,unsigned int type,const void * desc,size_t descsz)308 static void append_kcore_note(char *notes, size_t *i, const char *name,
309 unsigned int type, const void *desc,
310 size_t descsz)
311 {
312 struct elf_note *note = (struct elf_note *)¬es[*i];
313
314 note->n_namesz = strlen(name) + 1;
315 note->n_descsz = descsz;
316 note->n_type = type;
317 *i += sizeof(*note);
318 memcpy(¬es[*i], name, note->n_namesz);
319 *i = ALIGN(*i + note->n_namesz, 4);
320 memcpy(¬es[*i], desc, descsz);
321 *i = ALIGN(*i + descsz, 4);
322 }
323
read_kcore_iter(struct kiocb * iocb,struct iov_iter * iter)324 static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
325 {
326 struct file *file = iocb->ki_filp;
327 char *buf = file->private_data;
328 loff_t *fpos = &iocb->ki_pos;
329 size_t phdrs_offset, notes_offset;
330 size_t page_offline_frozen = 1;
331 struct kcore_list *m;
332 size_t tsz;
333 unsigned long start;
334 size_t buflen = iov_iter_count(iter);
335 size_t orig_buflen = buflen;
336 int ret = 0;
337
338 percpu_down_read(&kclist_lock);
339 /*
340 * Don't race against drivers that set PageOffline() and expect no
341 * further page access.
342 */
343 page_offline_freeze();
344
345 phdrs_offset = sizeof(struct elfhdr);
346 notes_offset = phdrs_offset + kcore_phdrs_len;
347
348 /* ELF file header. */
349 if (buflen && *fpos < sizeof(struct elfhdr)) {
350 struct elfhdr ehdr = {
351 .e_ident = {
352 [EI_MAG0] = ELFMAG0,
353 [EI_MAG1] = ELFMAG1,
354 [EI_MAG2] = ELFMAG2,
355 [EI_MAG3] = ELFMAG3,
356 [EI_CLASS] = ELF_CLASS,
357 [EI_DATA] = ELF_DATA,
358 [EI_VERSION] = EV_CURRENT,
359 [EI_OSABI] = ELF_OSABI,
360 },
361 .e_type = ET_CORE,
362 .e_machine = ELF_ARCH,
363 .e_version = EV_CURRENT,
364 .e_phoff = sizeof(struct elfhdr),
365 .e_flags = ELF_CORE_EFLAGS,
366 .e_ehsize = sizeof(struct elfhdr),
367 .e_phentsize = sizeof(struct elf_phdr),
368 .e_phnum = kcore_nphdr,
369 };
370
371 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos);
372 if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) {
373 ret = -EFAULT;
374 goto out;
375 }
376
377 buflen -= tsz;
378 *fpos += tsz;
379 }
380
381 /* ELF program headers. */
382 if (buflen && *fpos < phdrs_offset + kcore_phdrs_len) {
383 struct elf_phdr *phdrs, *phdr;
384
385 phdrs = kzalloc(kcore_phdrs_len, GFP_KERNEL);
386 if (!phdrs) {
387 ret = -ENOMEM;
388 goto out;
389 }
390
391 phdrs[0].p_type = PT_NOTE;
392 phdrs[0].p_offset = notes_offset;
393 phdrs[0].p_filesz = kcore_notes_len;
394
395 phdr = &phdrs[1];
396 list_for_each_entry(m, &kclist_head, list) {
397 phdr->p_type = PT_LOAD;
398 phdr->p_flags = PF_R | PF_W | PF_X;
399 phdr->p_offset = kc_vaddr_to_offset(m->addr)
400 + kcore_data_offset;
401 phdr->p_vaddr = (size_t)m->addr;
402 if (m->type == KCORE_RAM)
403 phdr->p_paddr = __pa(m->addr);
404 else if (m->type == KCORE_TEXT)
405 phdr->p_paddr = __pa_symbol(m->addr);
406 else
407 phdr->p_paddr = (elf_addr_t)-1;
408 phdr->p_filesz = phdr->p_memsz = m->size;
409 phdr->p_align = PAGE_SIZE;
410 phdr++;
411 }
412
413 tsz = min_t(size_t, buflen,
414 phdrs_offset + kcore_phdrs_len - *fpos);
415 if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz,
416 iter) != tsz) {
417 kfree(phdrs);
418 ret = -EFAULT;
419 goto out;
420 }
421 kfree(phdrs);
422
423 buflen -= tsz;
424 *fpos += tsz;
425 }
426
427 /* ELF note segment. */
428 if (buflen && *fpos < notes_offset + kcore_notes_len) {
429 struct elf_prstatus prstatus = {};
430 struct elf_prpsinfo prpsinfo = {
431 .pr_sname = 'R',
432 .pr_fname = "vmlinux",
433 };
434 char *notes;
435 size_t i = 0;
436
437 strscpy(prpsinfo.pr_psargs, saved_command_line,
438 sizeof(prpsinfo.pr_psargs));
439
440 notes = kzalloc(kcore_notes_len, GFP_KERNEL);
441 if (!notes) {
442 ret = -ENOMEM;
443 goto out;
444 }
445
446 append_kcore_note(notes, &i, CORE_STR, NT_PRSTATUS, &prstatus,
447 sizeof(prstatus));
448 append_kcore_note(notes, &i, CORE_STR, NT_PRPSINFO, &prpsinfo,
449 sizeof(prpsinfo));
450 append_kcore_note(notes, &i, CORE_STR, NT_TASKSTRUCT, current,
451 arch_task_struct_size);
452 /*
453 * vmcoreinfo_size is mostly constant after init time, but it
454 * can be changed by crash_save_vmcoreinfo(). Racing here with a
455 * panic on another CPU before the machine goes down is insanely
456 * unlikely, but it's better to not leave potential buffer
457 * overflows lying around, regardless.
458 */
459 append_kcore_note(notes, &i, VMCOREINFO_NOTE_NAME, 0,
460 vmcoreinfo_data,
461 min(vmcoreinfo_size, kcore_notes_len - i));
462
463 tsz = min_t(size_t, buflen,
464 notes_offset + kcore_notes_len - *fpos);
465 if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) {
466 kfree(notes);
467 ret = -EFAULT;
468 goto out;
469 }
470 kfree(notes);
471
472 buflen -= tsz;
473 *fpos += tsz;
474 }
475
476 /*
477 * Check to see if our file offset matches with any of
478 * the addresses in the elf_phdr on our list.
479 */
480 start = kc_offset_to_vaddr(*fpos - kcore_data_offset);
481 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
482 tsz = buflen;
483
484 m = NULL;
485 while (buflen) {
486 struct page *page;
487 unsigned long pfn;
488 phys_addr_t phys;
489 void *__start;
490
491 /*
492 * If this is the first iteration or the address is not within
493 * the previous entry, search for a matching entry.
494 */
495 if (!m || start < m->addr || start >= m->addr + m->size) {
496 struct kcore_list *pos;
497
498 m = NULL;
499 list_for_each_entry(pos, &kclist_head, list) {
500 if (start >= pos->addr &&
501 start < pos->addr + pos->size) {
502 m = pos;
503 break;
504 }
505 }
506 }
507
508 if (page_offline_frozen++ % MAX_ORDER_NR_PAGES == 0) {
509 page_offline_thaw();
510 cond_resched();
511 page_offline_freeze();
512 }
513
514 if (!m) {
515 if (iov_iter_zero(tsz, iter) != tsz) {
516 ret = -EFAULT;
517 goto out;
518 }
519 goto skip;
520 }
521
522 switch (m->type) {
523 case KCORE_VMALLOC:
524 {
525 const char *src = (char *)start;
526 size_t read = 0, left = tsz;
527
528 /*
529 * vmalloc uses spinlocks, so we optimistically try to
530 * read memory. If this fails, fault pages in and try
531 * again until we are done.
532 */
533 while (true) {
534 read += vread_iter(iter, src, left);
535 if (read == tsz)
536 break;
537
538 src += read;
539 left -= read;
540
541 if (fault_in_iov_iter_writeable(iter, left)) {
542 ret = -EFAULT;
543 goto out;
544 }
545 }
546 break;
547 }
548 case KCORE_USER:
549 /* User page is handled prior to normal kernel page: */
550 if (copy_to_iter((char *)start, tsz, iter) != tsz) {
551 ret = -EFAULT;
552 goto out;
553 }
554 break;
555 case KCORE_RAM:
556 phys = __pa(start);
557 pfn = phys >> PAGE_SHIFT;
558 page = pfn_to_online_page(pfn);
559
560 /*
561 * Don't read offline sections, logically offline pages
562 * (e.g., inflated in a balloon), hwpoisoned pages,
563 * and explicitly excluded physical ranges.
564 */
565 if (!page || PageOffline(page) ||
566 is_page_hwpoison(page) || !pfn_is_ram(pfn) ||
567 pfn_is_unaccepted_memory(pfn)) {
568 if (iov_iter_zero(tsz, iter) != tsz) {
569 ret = -EFAULT;
570 goto out;
571 }
572 break;
573 }
574 fallthrough;
575 case KCORE_VMEMMAP:
576 case KCORE_TEXT:
577 if (m->type == KCORE_RAM) {
578 __start = kc_xlate_dev_mem_ptr(phys);
579 if (!__start) {
580 ret = -ENOMEM;
581 if (iov_iter_zero(tsz, iter) != tsz)
582 ret = -EFAULT;
583 goto out;
584 }
585 } else {
586 __start = (void *)start;
587 }
588
589 /*
590 * Sadly we must use a bounce buffer here to be able to
591 * make use of copy_from_kernel_nofault(), as these
592 * memory regions might not always be mapped on all
593 * architectures.
594 */
595 ret = copy_from_kernel_nofault(buf, __start, tsz);
596 if (m->type == KCORE_RAM)
597 kc_unxlate_dev_mem_ptr(phys, __start);
598 if (ret) {
599 if (iov_iter_zero(tsz, iter) != tsz) {
600 ret = -EFAULT;
601 goto out;
602 }
603 ret = 0;
604 /*
605 * We know the bounce buffer is safe to copy from, so
606 * use _copy_to_iter() directly.
607 */
608 } else if (_copy_to_iter(buf, tsz, iter) != tsz) {
609 ret = -EFAULT;
610 goto out;
611 }
612 break;
613 default:
614 pr_warn_once("Unhandled KCORE type: %d\n", m->type);
615 if (iov_iter_zero(tsz, iter) != tsz) {
616 ret = -EFAULT;
617 goto out;
618 }
619 }
620 skip:
621 buflen -= tsz;
622 *fpos += tsz;
623 start += tsz;
624 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
625 }
626
627 out:
628 page_offline_thaw();
629 percpu_up_read(&kclist_lock);
630 if (ret)
631 return ret;
632 return orig_buflen - buflen;
633 }
634
open_kcore(struct inode * inode,struct file * filp)635 static int open_kcore(struct inode *inode, struct file *filp)
636 {
637 int ret = security_locked_down(LOCKDOWN_KCORE);
638
639 if (!capable(CAP_SYS_RAWIO))
640 return -EPERM;
641
642 if (ret)
643 return ret;
644
645 filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
646 if (!filp->private_data)
647 return -ENOMEM;
648
649 if (kcore_need_update)
650 kcore_update_ram();
651 if (i_size_read(inode) != proc_root_kcore->size) {
652 inode_lock(inode);
653 i_size_write(inode, proc_root_kcore->size);
654 inode_unlock(inode);
655 }
656 return 0;
657 }
658
release_kcore(struct inode * inode,struct file * file)659 static int release_kcore(struct inode *inode, struct file *file)
660 {
661 kfree(file->private_data);
662 return 0;
663 }
664
665 static const struct proc_ops kcore_proc_ops = {
666 .proc_flags = PROC_ENTRY_PERMANENT,
667 .proc_read_iter = read_kcore_iter,
668 .proc_open = open_kcore,
669 .proc_release = release_kcore,
670 .proc_lseek = default_llseek,
671 };
672
673 /* just remember that we have to update kcore */
kcore_callback(struct notifier_block * self,unsigned long action,void * arg)674 static int __meminit kcore_callback(struct notifier_block *self,
675 unsigned long action, void *arg)
676 {
677 switch (action) {
678 case MEM_ONLINE:
679 case MEM_OFFLINE:
680 kcore_need_update = 1;
681 break;
682 }
683 return NOTIFY_OK;
684 }
685
686
687 static struct kcore_list kcore_vmalloc;
688
689 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
690 static struct kcore_list kcore_text;
691 /*
692 * If defined, special segment is used for mapping kernel text instead of
693 * direct-map area. We need to create special TEXT section.
694 */
proc_kcore_text_init(void)695 static void __init proc_kcore_text_init(void)
696 {
697 kclist_add(&kcore_text, _text, _end - _text, KCORE_TEXT);
698 }
699 #else
proc_kcore_text_init(void)700 static void __init proc_kcore_text_init(void)
701 {
702 }
703 #endif
704
705 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
706 /*
707 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
708 */
709 static struct kcore_list kcore_modules;
add_modules_range(void)710 static void __init add_modules_range(void)
711 {
712 if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
713 kclist_add(&kcore_modules, (void *)MODULES_VADDR,
714 MODULES_END - MODULES_VADDR, KCORE_VMALLOC);
715 }
716 }
717 #else
add_modules_range(void)718 static void __init add_modules_range(void)
719 {
720 }
721 #endif
722
proc_kcore_init(void)723 static int __init proc_kcore_init(void)
724 {
725 proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &kcore_proc_ops);
726 if (!proc_root_kcore) {
727 pr_err("couldn't create /proc/kcore\n");
728 return 0; /* Always returns 0. */
729 }
730 /* Store text area if it's special */
731 proc_kcore_text_init();
732 /* Store vmalloc area */
733 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
734 VMALLOC_END - VMALLOC_START, KCORE_VMALLOC);
735 add_modules_range();
736 /* Store direct-map area from physical memory map */
737 kcore_update_ram();
738 hotplug_memory_notifier(kcore_callback, DEFAULT_CALLBACK_PRI);
739
740 return 0;
741 }
742 fs_initcall(proc_kcore_init);
743