xref: /linux/fs/proc/vmcore.c (revision 553c89ec31746ff96fc5562943fe5b1c9b1e9276)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *	fs/proc/vmcore.c Interface for accessing the crash
4  * 				 dump from the system's previous life.
5  * 	Heavily borrowed from fs/proc/kcore.c
6  *	Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7  *	Copyright (C) IBM Corporation, 2004. All rights reserved
8  *
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uio.h>
29 #include <linux/cc_platform.h>
30 #include <asm/io.h>
31 #include "internal.h"
32 
33 /* List representing chunks of contiguous memory areas and their offsets in
34  * vmcore file.
35  */
36 static LIST_HEAD(vmcore_list);
37 
38 /* Stores the pointer to the buffer containing kernel elf core headers. */
39 static char *elfcorebuf;
40 static size_t elfcorebuf_sz;
41 static size_t elfcorebuf_sz_orig;
42 
43 static char *elfnotes_buf;
44 static size_t elfnotes_sz;
45 /* Size of all notes minus the device dump notes */
46 static size_t elfnotes_orig_sz;
47 
48 /* Total size of vmcore file. */
49 static u64 vmcore_size;
50 
51 static struct proc_dir_entry *proc_vmcore;
52 
53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54 /* Device Dump list and mutex to synchronize access to list */
55 static LIST_HEAD(vmcoredd_list);
56 static DEFINE_MUTEX(vmcoredd_mutex);
57 
58 static bool vmcoredd_disabled;
59 core_param(novmcoredd, vmcoredd_disabled, bool, 0);
60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
61 
62 /* Device Dump Size */
63 static size_t vmcoredd_orig_sz;
64 
65 static DEFINE_SPINLOCK(vmcore_cb_lock);
66 DEFINE_STATIC_SRCU(vmcore_cb_srcu);
67 /* List of registered vmcore callbacks. */
68 static LIST_HEAD(vmcore_cb_list);
69 /* Whether the vmcore has been opened once. */
70 static bool vmcore_opened;
71 
register_vmcore_cb(struct vmcore_cb * cb)72 void register_vmcore_cb(struct vmcore_cb *cb)
73 {
74 	INIT_LIST_HEAD(&cb->next);
75 	spin_lock(&vmcore_cb_lock);
76 	list_add_tail(&cb->next, &vmcore_cb_list);
77 	/*
78 	 * Registering a vmcore callback after the vmcore was opened is
79 	 * very unusual (e.g., manual driver loading).
80 	 */
81 	if (vmcore_opened)
82 		pr_warn_once("Unexpected vmcore callback registration\n");
83 	spin_unlock(&vmcore_cb_lock);
84 }
85 EXPORT_SYMBOL_GPL(register_vmcore_cb);
86 
unregister_vmcore_cb(struct vmcore_cb * cb)87 void unregister_vmcore_cb(struct vmcore_cb *cb)
88 {
89 	spin_lock(&vmcore_cb_lock);
90 	list_del_rcu(&cb->next);
91 	/*
92 	 * Unregistering a vmcore callback after the vmcore was opened is
93 	 * very unusual (e.g., forced driver removal), but we cannot stop
94 	 * unregistering.
95 	 */
96 	if (vmcore_opened)
97 		pr_warn_once("Unexpected vmcore callback unregistration\n");
98 	spin_unlock(&vmcore_cb_lock);
99 
100 	synchronize_srcu(&vmcore_cb_srcu);
101 }
102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
103 
pfn_is_ram(unsigned long pfn)104 static bool pfn_is_ram(unsigned long pfn)
105 {
106 	struct vmcore_cb *cb;
107 	bool ret = true;
108 
109 	list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
110 				 srcu_read_lock_held(&vmcore_cb_srcu)) {
111 		if (unlikely(!cb->pfn_is_ram))
112 			continue;
113 		ret = cb->pfn_is_ram(cb, pfn);
114 		if (!ret)
115 			break;
116 	}
117 
118 	return ret;
119 }
120 
open_vmcore(struct inode * inode,struct file * file)121 static int open_vmcore(struct inode *inode, struct file *file)
122 {
123 	spin_lock(&vmcore_cb_lock);
124 	vmcore_opened = true;
125 	spin_unlock(&vmcore_cb_lock);
126 
127 	return 0;
128 }
129 
130 /* Reads a page from the oldmem device from given offset. */
read_from_oldmem(struct iov_iter * iter,size_t count,u64 * ppos,bool encrypted)131 ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
132 			 u64 *ppos, bool encrypted)
133 {
134 	unsigned long pfn, offset;
135 	ssize_t nr_bytes;
136 	ssize_t read = 0, tmp;
137 	int idx;
138 
139 	if (!count)
140 		return 0;
141 
142 	offset = (unsigned long)(*ppos % PAGE_SIZE);
143 	pfn = (unsigned long)(*ppos / PAGE_SIZE);
144 
145 	idx = srcu_read_lock(&vmcore_cb_srcu);
146 	do {
147 		if (count > (PAGE_SIZE - offset))
148 			nr_bytes = PAGE_SIZE - offset;
149 		else
150 			nr_bytes = count;
151 
152 		/* If pfn is not ram, return zeros for sparse dump files */
153 		if (!pfn_is_ram(pfn)) {
154 			tmp = iov_iter_zero(nr_bytes, iter);
155 		} else {
156 			if (encrypted)
157 				tmp = copy_oldmem_page_encrypted(iter, pfn,
158 								 nr_bytes,
159 								 offset);
160 			else
161 				tmp = copy_oldmem_page(iter, pfn, nr_bytes,
162 						       offset);
163 		}
164 		if (tmp < nr_bytes) {
165 			srcu_read_unlock(&vmcore_cb_srcu, idx);
166 			return -EFAULT;
167 		}
168 
169 		*ppos += nr_bytes;
170 		count -= nr_bytes;
171 		read += nr_bytes;
172 		++pfn;
173 		offset = 0;
174 	} while (count);
175 	srcu_read_unlock(&vmcore_cb_srcu, idx);
176 
177 	return read;
178 }
179 
180 /*
181  * Architectures may override this function to allocate ELF header in 2nd kernel
182  */
elfcorehdr_alloc(unsigned long long * addr,unsigned long long * size)183 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
184 {
185 	return 0;
186 }
187 
188 /*
189  * Architectures may override this function to free header
190  */
elfcorehdr_free(unsigned long long addr)191 void __weak elfcorehdr_free(unsigned long long addr)
192 {}
193 
194 /*
195  * Architectures may override this function to read from ELF header
196  */
elfcorehdr_read(char * buf,size_t count,u64 * ppos)197 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
198 {
199 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
200 	struct iov_iter iter;
201 
202 	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
203 
204 	return read_from_oldmem(&iter, count, ppos, false);
205 }
206 
207 /*
208  * Architectures may override this function to read from notes sections
209  */
elfcorehdr_read_notes(char * buf,size_t count,u64 * ppos)210 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
211 {
212 	struct kvec kvec = { .iov_base = buf, .iov_len = count };
213 	struct iov_iter iter;
214 
215 	iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, count);
216 
217 	return read_from_oldmem(&iter, count, ppos,
218 			cc_platform_has(CC_ATTR_MEM_ENCRYPT));
219 }
220 
221 /*
222  * Architectures may override this function to map oldmem
223  */
remap_oldmem_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)224 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
225 				  unsigned long from, unsigned long pfn,
226 				  unsigned long size, pgprot_t prot)
227 {
228 	prot = pgprot_encrypted(prot);
229 	return remap_pfn_range(vma, from, pfn, size, prot);
230 }
231 
232 /*
233  * Architectures which support memory encryption override this.
234  */
copy_oldmem_page_encrypted(struct iov_iter * iter,unsigned long pfn,size_t csize,unsigned long offset)235 ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter,
236 		unsigned long pfn, size_t csize, unsigned long offset)
237 {
238 	return copy_oldmem_page(iter, pfn, csize, offset);
239 }
240 
241 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
vmcoredd_copy_dumps(struct iov_iter * iter,u64 start,size_t size)242 static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size)
243 {
244 	struct vmcoredd_node *dump;
245 	u64 offset = 0;
246 	int ret = 0;
247 	size_t tsz;
248 	char *buf;
249 
250 	mutex_lock(&vmcoredd_mutex);
251 	list_for_each_entry(dump, &vmcoredd_list, list) {
252 		if (start < offset + dump->size) {
253 			tsz = min(offset + (u64)dump->size - start, (u64)size);
254 			buf = dump->buf + start - offset;
255 			if (copy_to_iter(buf, tsz, iter) < tsz) {
256 				ret = -EFAULT;
257 				goto out_unlock;
258 			}
259 
260 			size -= tsz;
261 			start += tsz;
262 
263 			/* Leave now if buffer filled already */
264 			if (!size)
265 				goto out_unlock;
266 		}
267 		offset += dump->size;
268 	}
269 
270 out_unlock:
271 	mutex_unlock(&vmcoredd_mutex);
272 	return ret;
273 }
274 
275 #ifdef CONFIG_MMU
vmcoredd_mmap_dumps(struct vm_area_struct * vma,unsigned long dst,u64 start,size_t size)276 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
277 			       u64 start, size_t size)
278 {
279 	struct vmcoredd_node *dump;
280 	u64 offset = 0;
281 	int ret = 0;
282 	size_t tsz;
283 	char *buf;
284 
285 	mutex_lock(&vmcoredd_mutex);
286 	list_for_each_entry(dump, &vmcoredd_list, list) {
287 		if (start < offset + dump->size) {
288 			tsz = min(offset + (u64)dump->size - start, (u64)size);
289 			buf = dump->buf + start - offset;
290 			if (remap_vmalloc_range_partial(vma, dst, buf, 0,
291 							tsz)) {
292 				ret = -EFAULT;
293 				goto out_unlock;
294 			}
295 
296 			size -= tsz;
297 			start += tsz;
298 			dst += tsz;
299 
300 			/* Leave now if buffer filled already */
301 			if (!size)
302 				goto out_unlock;
303 		}
304 		offset += dump->size;
305 	}
306 
307 out_unlock:
308 	mutex_unlock(&vmcoredd_mutex);
309 	return ret;
310 }
311 #endif /* CONFIG_MMU */
312 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
313 
314 /* Read from the ELF header and then the crash dump. On error, negative value is
315  * returned otherwise number of bytes read are returned.
316  */
__read_vmcore(struct iov_iter * iter,loff_t * fpos)317 static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
318 {
319 	ssize_t acc = 0, tmp;
320 	size_t tsz;
321 	u64 start;
322 	struct vmcore *m = NULL;
323 
324 	if (!iov_iter_count(iter) || *fpos >= vmcore_size)
325 		return 0;
326 
327 	iov_iter_truncate(iter, vmcore_size - *fpos);
328 
329 	/* Read ELF core header */
330 	if (*fpos < elfcorebuf_sz) {
331 		tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter));
332 		if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz)
333 			return -EFAULT;
334 		*fpos += tsz;
335 		acc += tsz;
336 
337 		/* leave now if filled buffer already */
338 		if (!iov_iter_count(iter))
339 			return acc;
340 	}
341 
342 	/* Read ELF note segment */
343 	if (*fpos < elfcorebuf_sz + elfnotes_sz) {
344 		void *kaddr;
345 
346 		/* We add device dumps before other elf notes because the
347 		 * other elf notes may not fill the elf notes buffer
348 		 * completely and we will end up with zero-filled data
349 		 * between the elf notes and the device dumps. Tools will
350 		 * then try to decode this zero-filled data as valid notes
351 		 * and we don't want that. Hence, adding device dumps before
352 		 * the other elf notes ensure that zero-filled data can be
353 		 * avoided.
354 		 */
355 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
356 		/* Read device dumps */
357 		if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
358 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
359 				  (size_t)*fpos, iov_iter_count(iter));
360 			start = *fpos - elfcorebuf_sz;
361 			if (vmcoredd_copy_dumps(iter, start, tsz))
362 				return -EFAULT;
363 
364 			*fpos += tsz;
365 			acc += tsz;
366 
367 			/* leave now if filled buffer already */
368 			if (!iov_iter_count(iter))
369 				return acc;
370 		}
371 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
372 
373 		/* Read remaining elf notes */
374 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos,
375 			  iov_iter_count(iter));
376 		kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
377 		if (copy_to_iter(kaddr, tsz, iter) < tsz)
378 			return -EFAULT;
379 
380 		*fpos += tsz;
381 		acc += tsz;
382 
383 		/* leave now if filled buffer already */
384 		if (!iov_iter_count(iter))
385 			return acc;
386 
387 		cond_resched();
388 	}
389 
390 	list_for_each_entry(m, &vmcore_list, list) {
391 		if (*fpos < m->offset + m->size) {
392 			tsz = (size_t)min_t(unsigned long long,
393 					    m->offset + m->size - *fpos,
394 					    iov_iter_count(iter));
395 			start = m->paddr + *fpos - m->offset;
396 			tmp = read_from_oldmem(iter, tsz, &start,
397 					cc_platform_has(CC_ATTR_MEM_ENCRYPT));
398 			if (tmp < 0)
399 				return tmp;
400 			*fpos += tsz;
401 			acc += tsz;
402 
403 			/* leave now if filled buffer already */
404 			if (!iov_iter_count(iter))
405 				return acc;
406 		}
407 	}
408 
409 	return acc;
410 }
411 
read_vmcore(struct kiocb * iocb,struct iov_iter * iter)412 static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter)
413 {
414 	return __read_vmcore(iter, &iocb->ki_pos);
415 }
416 
417 /**
418  * vmcore_alloc_buf - allocate buffer in vmalloc memory
419  * @size: size of buffer
420  *
421  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
422  * the buffer to user-space by means of remap_vmalloc_range().
423  *
424  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
425  * disabled and there's no need to allow users to mmap the buffer.
426  */
vmcore_alloc_buf(size_t size)427 static inline char *vmcore_alloc_buf(size_t size)
428 {
429 #ifdef CONFIG_MMU
430 	return vmalloc_user(size);
431 #else
432 	return vzalloc(size);
433 #endif
434 }
435 
436 /*
437  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
438  * essential for mmap_vmcore() in order to map physically
439  * non-contiguous objects (ELF header, ELF note segment and memory
440  * regions in the 1st kernel pointed to by PT_LOAD entries) into
441  * virtually contiguous user-space in ELF layout.
442  */
443 #ifdef CONFIG_MMU
444 
445 /*
446  * The vmcore fault handler uses the page cache and fills data using the
447  * standard __read_vmcore() function.
448  *
449  * On s390 the fault handler is used for memory regions that can't be mapped
450  * directly with remap_pfn_range().
451  */
mmap_vmcore_fault(struct vm_fault * vmf)452 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
453 {
454 #ifdef CONFIG_S390
455 	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
456 	pgoff_t index = vmf->pgoff;
457 	struct iov_iter iter;
458 	struct kvec kvec;
459 	struct page *page;
460 	loff_t offset;
461 	int rc;
462 
463 	page = find_or_create_page(mapping, index, GFP_KERNEL);
464 	if (!page)
465 		return VM_FAULT_OOM;
466 	if (!PageUptodate(page)) {
467 		offset = (loff_t) index << PAGE_SHIFT;
468 		kvec.iov_base = page_address(page);
469 		kvec.iov_len = PAGE_SIZE;
470 		iov_iter_kvec(&iter, ITER_DEST, &kvec, 1, PAGE_SIZE);
471 
472 		rc = __read_vmcore(&iter, &offset);
473 		if (rc < 0) {
474 			unlock_page(page);
475 			put_page(page);
476 			return vmf_error(rc);
477 		}
478 		SetPageUptodate(page);
479 	}
480 	unlock_page(page);
481 	vmf->page = page;
482 	return 0;
483 #else
484 	return VM_FAULT_SIGBUS;
485 #endif
486 }
487 
488 static const struct vm_operations_struct vmcore_mmap_ops = {
489 	.fault = mmap_vmcore_fault,
490 };
491 
492 /*
493  * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
494  * reported as not being ram with the zero page.
495  *
496  * @vma: vm_area_struct describing requested mapping
497  * @from: start remapping from
498  * @pfn: page frame number to start remapping to
499  * @size: remapping size
500  * @prot: protection bits
501  *
502  * Returns zero on success, -EAGAIN on failure.
503  */
remap_oldmem_pfn_checked(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)504 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
505 				    unsigned long from, unsigned long pfn,
506 				    unsigned long size, pgprot_t prot)
507 {
508 	unsigned long map_size;
509 	unsigned long pos_start, pos_end, pos;
510 	unsigned long zeropage_pfn = my_zero_pfn(0);
511 	size_t len = 0;
512 
513 	pos_start = pfn;
514 	pos_end = pfn + (size >> PAGE_SHIFT);
515 
516 	for (pos = pos_start; pos < pos_end; ++pos) {
517 		if (!pfn_is_ram(pos)) {
518 			/*
519 			 * We hit a page which is not ram. Remap the continuous
520 			 * region between pos_start and pos-1 and replace
521 			 * the non-ram page at pos with the zero page.
522 			 */
523 			if (pos > pos_start) {
524 				/* Remap continuous region */
525 				map_size = (pos - pos_start) << PAGE_SHIFT;
526 				if (remap_oldmem_pfn_range(vma, from + len,
527 							   pos_start, map_size,
528 							   prot))
529 					goto fail;
530 				len += map_size;
531 			}
532 			/* Remap the zero page */
533 			if (remap_oldmem_pfn_range(vma, from + len,
534 						   zeropage_pfn,
535 						   PAGE_SIZE, prot))
536 				goto fail;
537 			len += PAGE_SIZE;
538 			pos_start = pos + 1;
539 		}
540 	}
541 	if (pos > pos_start) {
542 		/* Remap the rest */
543 		map_size = (pos - pos_start) << PAGE_SHIFT;
544 		if (remap_oldmem_pfn_range(vma, from + len, pos_start,
545 					   map_size, prot))
546 			goto fail;
547 	}
548 	return 0;
549 fail:
550 	do_munmap(vma->vm_mm, from, len, NULL);
551 	return -EAGAIN;
552 }
553 
vmcore_remap_oldmem_pfn(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)554 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
555 			    unsigned long from, unsigned long pfn,
556 			    unsigned long size, pgprot_t prot)
557 {
558 	int ret, idx;
559 
560 	/*
561 	 * Check if a callback was registered to avoid looping over all
562 	 * pages without a reason.
563 	 */
564 	idx = srcu_read_lock(&vmcore_cb_srcu);
565 	if (!list_empty(&vmcore_cb_list))
566 		ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
567 	else
568 		ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
569 	srcu_read_unlock(&vmcore_cb_srcu, idx);
570 	return ret;
571 }
572 
mmap_vmcore(struct file * file,struct vm_area_struct * vma)573 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
574 {
575 	size_t size = vma->vm_end - vma->vm_start;
576 	u64 start, end, len, tsz;
577 	struct vmcore *m;
578 
579 	start = (u64)vma->vm_pgoff << PAGE_SHIFT;
580 	end = start + size;
581 
582 	if (size > vmcore_size || end > vmcore_size)
583 		return -EINVAL;
584 
585 	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
586 		return -EPERM;
587 
588 	vm_flags_mod(vma, VM_MIXEDMAP, VM_MAYWRITE | VM_MAYEXEC);
589 	vma->vm_ops = &vmcore_mmap_ops;
590 
591 	len = 0;
592 
593 	if (start < elfcorebuf_sz) {
594 		u64 pfn;
595 
596 		tsz = min(elfcorebuf_sz - (size_t)start, size);
597 		pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
598 		if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
599 				    vma->vm_page_prot))
600 			return -EAGAIN;
601 		size -= tsz;
602 		start += tsz;
603 		len += tsz;
604 
605 		if (size == 0)
606 			return 0;
607 	}
608 
609 	if (start < elfcorebuf_sz + elfnotes_sz) {
610 		void *kaddr;
611 
612 		/* We add device dumps before other elf notes because the
613 		 * other elf notes may not fill the elf notes buffer
614 		 * completely and we will end up with zero-filled data
615 		 * between the elf notes and the device dumps. Tools will
616 		 * then try to decode this zero-filled data as valid notes
617 		 * and we don't want that. Hence, adding device dumps before
618 		 * the other elf notes ensure that zero-filled data can be
619 		 * avoided. This also ensures that the device dumps and
620 		 * other elf notes can be properly mmaped at page aligned
621 		 * address.
622 		 */
623 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
624 		/* Read device dumps */
625 		if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
626 			u64 start_off;
627 
628 			tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
629 				  (size_t)start, size);
630 			start_off = start - elfcorebuf_sz;
631 			if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
632 						start_off, tsz))
633 				goto fail;
634 
635 			size -= tsz;
636 			start += tsz;
637 			len += tsz;
638 
639 			/* leave now if filled buffer already */
640 			if (!size)
641 				return 0;
642 		}
643 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
644 
645 		/* Read remaining elf notes */
646 		tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
647 		kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
648 		if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
649 						kaddr, 0, tsz))
650 			goto fail;
651 
652 		size -= tsz;
653 		start += tsz;
654 		len += tsz;
655 
656 		if (size == 0)
657 			return 0;
658 	}
659 
660 	list_for_each_entry(m, &vmcore_list, list) {
661 		if (start < m->offset + m->size) {
662 			u64 paddr = 0;
663 
664 			tsz = (size_t)min_t(unsigned long long,
665 					    m->offset + m->size - start, size);
666 			paddr = m->paddr + start - m->offset;
667 			if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
668 						    paddr >> PAGE_SHIFT, tsz,
669 						    vma->vm_page_prot))
670 				goto fail;
671 			size -= tsz;
672 			start += tsz;
673 			len += tsz;
674 
675 			if (size == 0)
676 				return 0;
677 		}
678 	}
679 
680 	return 0;
681 fail:
682 	do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
683 	return -EAGAIN;
684 }
685 #else
mmap_vmcore(struct file * file,struct vm_area_struct * vma)686 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
687 {
688 	return -ENOSYS;
689 }
690 #endif
691 
692 static const struct proc_ops vmcore_proc_ops = {
693 	.proc_open	= open_vmcore,
694 	.proc_read_iter	= read_vmcore,
695 	.proc_lseek	= default_llseek,
696 	.proc_mmap	= mmap_vmcore,
697 };
698 
get_new_element(void)699 static struct vmcore* __init get_new_element(void)
700 {
701 	return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
702 }
703 
get_vmcore_size(size_t elfsz,size_t elfnotesegsz,struct list_head * vc_list)704 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
705 			   struct list_head *vc_list)
706 {
707 	u64 size;
708 	struct vmcore *m;
709 
710 	size = elfsz + elfnotesegsz;
711 	list_for_each_entry(m, vc_list, list) {
712 		size += m->size;
713 	}
714 	return size;
715 }
716 
717 /**
718  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
719  *
720  * @ehdr_ptr: ELF header
721  *
722  * This function updates p_memsz member of each PT_NOTE entry in the
723  * program header table pointed to by @ehdr_ptr to real size of ELF
724  * note segment.
725  */
update_note_header_size_elf64(const Elf64_Ehdr * ehdr_ptr)726 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
727 {
728 	int i, rc=0;
729 	Elf64_Phdr *phdr_ptr;
730 	Elf64_Nhdr *nhdr_ptr;
731 
732 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
733 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
734 		void *notes_section;
735 		u64 offset, max_sz, sz, real_sz = 0;
736 		if (phdr_ptr->p_type != PT_NOTE)
737 			continue;
738 		max_sz = phdr_ptr->p_memsz;
739 		offset = phdr_ptr->p_offset;
740 		notes_section = kmalloc(max_sz, GFP_KERNEL);
741 		if (!notes_section)
742 			return -ENOMEM;
743 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
744 		if (rc < 0) {
745 			kfree(notes_section);
746 			return rc;
747 		}
748 		nhdr_ptr = notes_section;
749 		while (nhdr_ptr->n_namesz != 0) {
750 			sz = sizeof(Elf64_Nhdr) +
751 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
752 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
753 			if ((real_sz + sz) > max_sz) {
754 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
755 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
756 				break;
757 			}
758 			real_sz += sz;
759 			nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
760 		}
761 		kfree(notes_section);
762 		phdr_ptr->p_memsz = real_sz;
763 		if (real_sz == 0) {
764 			pr_warn("Warning: Zero PT_NOTE entries found\n");
765 		}
766 	}
767 
768 	return 0;
769 }
770 
771 /**
772  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
773  * headers and sum of real size of their ELF note segment headers and
774  * data.
775  *
776  * @ehdr_ptr: ELF header
777  * @nr_ptnote: buffer for the number of PT_NOTE program headers
778  * @sz_ptnote: buffer for size of unique PT_NOTE program header
779  *
780  * This function is used to merge multiple PT_NOTE program headers
781  * into a unique single one. The resulting unique entry will have
782  * @sz_ptnote in its phdr->p_mem.
783  *
784  * It is assumed that program headers with PT_NOTE type pointed to by
785  * @ehdr_ptr has already been updated by update_note_header_size_elf64
786  * and each of PT_NOTE program headers has actual ELF note segment
787  * size in its p_memsz member.
788  */
get_note_number_and_size_elf64(const Elf64_Ehdr * ehdr_ptr,int * nr_ptnote,u64 * sz_ptnote)789 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
790 						 int *nr_ptnote, u64 *sz_ptnote)
791 {
792 	int i;
793 	Elf64_Phdr *phdr_ptr;
794 
795 	*nr_ptnote = *sz_ptnote = 0;
796 
797 	phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
798 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
799 		if (phdr_ptr->p_type != PT_NOTE)
800 			continue;
801 		*nr_ptnote += 1;
802 		*sz_ptnote += phdr_ptr->p_memsz;
803 	}
804 
805 	return 0;
806 }
807 
808 /**
809  * copy_notes_elf64 - copy ELF note segments in a given buffer
810  *
811  * @ehdr_ptr: ELF header
812  * @notes_buf: buffer into which ELF note segments are copied
813  *
814  * This function is used to copy ELF note segment in the 1st kernel
815  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
816  * size of the buffer @notes_buf is equal to or larger than sum of the
817  * real ELF note segment headers and data.
818  *
819  * It is assumed that program headers with PT_NOTE type pointed to by
820  * @ehdr_ptr has already been updated by update_note_header_size_elf64
821  * and each of PT_NOTE program headers has actual ELF note segment
822  * size in its p_memsz member.
823  */
copy_notes_elf64(const Elf64_Ehdr * ehdr_ptr,char * notes_buf)824 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
825 {
826 	int i, rc=0;
827 	Elf64_Phdr *phdr_ptr;
828 
829 	phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
830 
831 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
832 		u64 offset;
833 		if (phdr_ptr->p_type != PT_NOTE)
834 			continue;
835 		offset = phdr_ptr->p_offset;
836 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
837 					   &offset);
838 		if (rc < 0)
839 			return rc;
840 		notes_buf += phdr_ptr->p_memsz;
841 	}
842 
843 	return 0;
844 }
845 
846 /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf64(char * elfptr,size_t * elfsz,char ** notes_buf,size_t * notes_sz)847 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
848 					   char **notes_buf, size_t *notes_sz)
849 {
850 	int i, nr_ptnote=0, rc=0;
851 	char *tmp;
852 	Elf64_Ehdr *ehdr_ptr;
853 	Elf64_Phdr phdr;
854 	u64 phdr_sz = 0, note_off;
855 
856 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
857 
858 	rc = update_note_header_size_elf64(ehdr_ptr);
859 	if (rc < 0)
860 		return rc;
861 
862 	rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
863 	if (rc < 0)
864 		return rc;
865 
866 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
867 	*notes_buf = vmcore_alloc_buf(*notes_sz);
868 	if (!*notes_buf)
869 		return -ENOMEM;
870 
871 	rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
872 	if (rc < 0)
873 		return rc;
874 
875 	/* Prepare merged PT_NOTE program header. */
876 	phdr.p_type    = PT_NOTE;
877 	phdr.p_flags   = 0;
878 	note_off = sizeof(Elf64_Ehdr) +
879 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
880 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
881 	phdr.p_vaddr   = phdr.p_paddr = 0;
882 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
883 	phdr.p_align   = 4;
884 
885 	/* Add merged PT_NOTE program header*/
886 	tmp = elfptr + sizeof(Elf64_Ehdr);
887 	memcpy(tmp, &phdr, sizeof(phdr));
888 	tmp += sizeof(phdr);
889 
890 	/* Remove unwanted PT_NOTE program headers. */
891 	i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
892 	*elfsz = *elfsz - i;
893 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
894 	memset(elfptr + *elfsz, 0, i);
895 	*elfsz = roundup(*elfsz, PAGE_SIZE);
896 
897 	/* Modify e_phnum to reflect merged headers. */
898 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
899 
900 	/* Store the size of all notes.  We need this to update the note
901 	 * header when the device dumps will be added.
902 	 */
903 	elfnotes_orig_sz = phdr.p_memsz;
904 
905 	return 0;
906 }
907 
908 /**
909  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
910  *
911  * @ehdr_ptr: ELF header
912  *
913  * This function updates p_memsz member of each PT_NOTE entry in the
914  * program header table pointed to by @ehdr_ptr to real size of ELF
915  * note segment.
916  */
update_note_header_size_elf32(const Elf32_Ehdr * ehdr_ptr)917 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
918 {
919 	int i, rc=0;
920 	Elf32_Phdr *phdr_ptr;
921 	Elf32_Nhdr *nhdr_ptr;
922 
923 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
924 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
925 		void *notes_section;
926 		u64 offset, max_sz, sz, real_sz = 0;
927 		if (phdr_ptr->p_type != PT_NOTE)
928 			continue;
929 		max_sz = phdr_ptr->p_memsz;
930 		offset = phdr_ptr->p_offset;
931 		notes_section = kmalloc(max_sz, GFP_KERNEL);
932 		if (!notes_section)
933 			return -ENOMEM;
934 		rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
935 		if (rc < 0) {
936 			kfree(notes_section);
937 			return rc;
938 		}
939 		nhdr_ptr = notes_section;
940 		while (nhdr_ptr->n_namesz != 0) {
941 			sz = sizeof(Elf32_Nhdr) +
942 				(((u64)nhdr_ptr->n_namesz + 3) & ~3) +
943 				(((u64)nhdr_ptr->n_descsz + 3) & ~3);
944 			if ((real_sz + sz) > max_sz) {
945 				pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
946 					nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
947 				break;
948 			}
949 			real_sz += sz;
950 			nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
951 		}
952 		kfree(notes_section);
953 		phdr_ptr->p_memsz = real_sz;
954 		if (real_sz == 0) {
955 			pr_warn("Warning: Zero PT_NOTE entries found\n");
956 		}
957 	}
958 
959 	return 0;
960 }
961 
962 /**
963  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
964  * headers and sum of real size of their ELF note segment headers and
965  * data.
966  *
967  * @ehdr_ptr: ELF header
968  * @nr_ptnote: buffer for the number of PT_NOTE program headers
969  * @sz_ptnote: buffer for size of unique PT_NOTE program header
970  *
971  * This function is used to merge multiple PT_NOTE program headers
972  * into a unique single one. The resulting unique entry will have
973  * @sz_ptnote in its phdr->p_mem.
974  *
975  * It is assumed that program headers with PT_NOTE type pointed to by
976  * @ehdr_ptr has already been updated by update_note_header_size_elf32
977  * and each of PT_NOTE program headers has actual ELF note segment
978  * size in its p_memsz member.
979  */
get_note_number_and_size_elf32(const Elf32_Ehdr * ehdr_ptr,int * nr_ptnote,u64 * sz_ptnote)980 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
981 						 int *nr_ptnote, u64 *sz_ptnote)
982 {
983 	int i;
984 	Elf32_Phdr *phdr_ptr;
985 
986 	*nr_ptnote = *sz_ptnote = 0;
987 
988 	phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
989 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
990 		if (phdr_ptr->p_type != PT_NOTE)
991 			continue;
992 		*nr_ptnote += 1;
993 		*sz_ptnote += phdr_ptr->p_memsz;
994 	}
995 
996 	return 0;
997 }
998 
999 /**
1000  * copy_notes_elf32 - copy ELF note segments in a given buffer
1001  *
1002  * @ehdr_ptr: ELF header
1003  * @notes_buf: buffer into which ELF note segments are copied
1004  *
1005  * This function is used to copy ELF note segment in the 1st kernel
1006  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1007  * size of the buffer @notes_buf is equal to or larger than sum of the
1008  * real ELF note segment headers and data.
1009  *
1010  * It is assumed that program headers with PT_NOTE type pointed to by
1011  * @ehdr_ptr has already been updated by update_note_header_size_elf32
1012  * and each of PT_NOTE program headers has actual ELF note segment
1013  * size in its p_memsz member.
1014  */
copy_notes_elf32(const Elf32_Ehdr * ehdr_ptr,char * notes_buf)1015 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
1016 {
1017 	int i, rc=0;
1018 	Elf32_Phdr *phdr_ptr;
1019 
1020 	phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
1021 
1022 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1023 		u64 offset;
1024 		if (phdr_ptr->p_type != PT_NOTE)
1025 			continue;
1026 		offset = phdr_ptr->p_offset;
1027 		rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1028 					   &offset);
1029 		if (rc < 0)
1030 			return rc;
1031 		notes_buf += phdr_ptr->p_memsz;
1032 	}
1033 
1034 	return 0;
1035 }
1036 
1037 /* Merges all the PT_NOTE headers into one. */
merge_note_headers_elf32(char * elfptr,size_t * elfsz,char ** notes_buf,size_t * notes_sz)1038 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1039 					   char **notes_buf, size_t *notes_sz)
1040 {
1041 	int i, nr_ptnote=0, rc=0;
1042 	char *tmp;
1043 	Elf32_Ehdr *ehdr_ptr;
1044 	Elf32_Phdr phdr;
1045 	u64 phdr_sz = 0, note_off;
1046 
1047 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1048 
1049 	rc = update_note_header_size_elf32(ehdr_ptr);
1050 	if (rc < 0)
1051 		return rc;
1052 
1053 	rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1054 	if (rc < 0)
1055 		return rc;
1056 
1057 	*notes_sz = roundup(phdr_sz, PAGE_SIZE);
1058 	*notes_buf = vmcore_alloc_buf(*notes_sz);
1059 	if (!*notes_buf)
1060 		return -ENOMEM;
1061 
1062 	rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1063 	if (rc < 0)
1064 		return rc;
1065 
1066 	/* Prepare merged PT_NOTE program header. */
1067 	phdr.p_type    = PT_NOTE;
1068 	phdr.p_flags   = 0;
1069 	note_off = sizeof(Elf32_Ehdr) +
1070 			(ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1071 	phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1072 	phdr.p_vaddr   = phdr.p_paddr = 0;
1073 	phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1074 	phdr.p_align   = 4;
1075 
1076 	/* Add merged PT_NOTE program header*/
1077 	tmp = elfptr + sizeof(Elf32_Ehdr);
1078 	memcpy(tmp, &phdr, sizeof(phdr));
1079 	tmp += sizeof(phdr);
1080 
1081 	/* Remove unwanted PT_NOTE program headers. */
1082 	i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1083 	*elfsz = *elfsz - i;
1084 	memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1085 	memset(elfptr + *elfsz, 0, i);
1086 	*elfsz = roundup(*elfsz, PAGE_SIZE);
1087 
1088 	/* Modify e_phnum to reflect merged headers. */
1089 	ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1090 
1091 	/* Store the size of all notes.  We need this to update the note
1092 	 * header when the device dumps will be added.
1093 	 */
1094 	elfnotes_orig_sz = phdr.p_memsz;
1095 
1096 	return 0;
1097 }
1098 
1099 /* Add memory chunks represented by program headers to vmcore list. Also update
1100  * the new offset fields of exported program headers. */
process_ptload_program_headers_elf64(char * elfptr,size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)1101 static int __init process_ptload_program_headers_elf64(char *elfptr,
1102 						size_t elfsz,
1103 						size_t elfnotes_sz,
1104 						struct list_head *vc_list)
1105 {
1106 	int i;
1107 	Elf64_Ehdr *ehdr_ptr;
1108 	Elf64_Phdr *phdr_ptr;
1109 	loff_t vmcore_off;
1110 	struct vmcore *new;
1111 
1112 	ehdr_ptr = (Elf64_Ehdr *)elfptr;
1113 	phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1114 
1115 	/* Skip ELF header, program headers and ELF note segment. */
1116 	vmcore_off = elfsz + elfnotes_sz;
1117 
1118 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1119 		u64 paddr, start, end, size;
1120 
1121 		if (phdr_ptr->p_type != PT_LOAD)
1122 			continue;
1123 
1124 		paddr = phdr_ptr->p_offset;
1125 		start = rounddown(paddr, PAGE_SIZE);
1126 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1127 		size = end - start;
1128 
1129 		/* Add this contiguous chunk of memory to vmcore list.*/
1130 		new = get_new_element();
1131 		if (!new)
1132 			return -ENOMEM;
1133 		new->paddr = start;
1134 		new->size = size;
1135 		list_add_tail(&new->list, vc_list);
1136 
1137 		/* Update the program header offset. */
1138 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1139 		vmcore_off = vmcore_off + size;
1140 	}
1141 	return 0;
1142 }
1143 
process_ptload_program_headers_elf32(char * elfptr,size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)1144 static int __init process_ptload_program_headers_elf32(char *elfptr,
1145 						size_t elfsz,
1146 						size_t elfnotes_sz,
1147 						struct list_head *vc_list)
1148 {
1149 	int i;
1150 	Elf32_Ehdr *ehdr_ptr;
1151 	Elf32_Phdr *phdr_ptr;
1152 	loff_t vmcore_off;
1153 	struct vmcore *new;
1154 
1155 	ehdr_ptr = (Elf32_Ehdr *)elfptr;
1156 	phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1157 
1158 	/* Skip ELF header, program headers and ELF note segment. */
1159 	vmcore_off = elfsz + elfnotes_sz;
1160 
1161 	for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1162 		u64 paddr, start, end, size;
1163 
1164 		if (phdr_ptr->p_type != PT_LOAD)
1165 			continue;
1166 
1167 		paddr = phdr_ptr->p_offset;
1168 		start = rounddown(paddr, PAGE_SIZE);
1169 		end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1170 		size = end - start;
1171 
1172 		/* Add this contiguous chunk of memory to vmcore list.*/
1173 		new = get_new_element();
1174 		if (!new)
1175 			return -ENOMEM;
1176 		new->paddr = start;
1177 		new->size = size;
1178 		list_add_tail(&new->list, vc_list);
1179 
1180 		/* Update the program header offset */
1181 		phdr_ptr->p_offset = vmcore_off + (paddr - start);
1182 		vmcore_off = vmcore_off + size;
1183 	}
1184 	return 0;
1185 }
1186 
1187 /* Sets offset fields of vmcore elements. */
set_vmcore_list_offsets(size_t elfsz,size_t elfnotes_sz,struct list_head * vc_list)1188 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1189 				    struct list_head *vc_list)
1190 {
1191 	loff_t vmcore_off;
1192 	struct vmcore *m;
1193 
1194 	/* Skip ELF header, program headers and ELF note segment. */
1195 	vmcore_off = elfsz + elfnotes_sz;
1196 
1197 	list_for_each_entry(m, vc_list, list) {
1198 		m->offset = vmcore_off;
1199 		vmcore_off += m->size;
1200 	}
1201 }
1202 
free_elfcorebuf(void)1203 static void free_elfcorebuf(void)
1204 {
1205 	free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1206 	elfcorebuf = NULL;
1207 	vfree(elfnotes_buf);
1208 	elfnotes_buf = NULL;
1209 }
1210 
parse_crash_elf64_headers(void)1211 static int __init parse_crash_elf64_headers(void)
1212 {
1213 	int rc=0;
1214 	Elf64_Ehdr ehdr;
1215 	u64 addr;
1216 
1217 	addr = elfcorehdr_addr;
1218 
1219 	/* Read ELF header */
1220 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1221 	if (rc < 0)
1222 		return rc;
1223 
1224 	/* Do some basic Verification. */
1225 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1226 		(ehdr.e_type != ET_CORE) ||
1227 		!vmcore_elf64_check_arch(&ehdr) ||
1228 		ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1229 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1230 		ehdr.e_version != EV_CURRENT ||
1231 		ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1232 		ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1233 		ehdr.e_phnum == 0) {
1234 		pr_warn("Warning: Core image elf header is not sane\n");
1235 		return -EINVAL;
1236 	}
1237 
1238 	/* Read in all elf headers. */
1239 	elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1240 				ehdr.e_phnum * sizeof(Elf64_Phdr);
1241 	elfcorebuf_sz = elfcorebuf_sz_orig;
1242 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1243 					      get_order(elfcorebuf_sz_orig));
1244 	if (!elfcorebuf)
1245 		return -ENOMEM;
1246 	addr = elfcorehdr_addr;
1247 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1248 	if (rc < 0)
1249 		goto fail;
1250 
1251 	/* Merge all PT_NOTE headers into one. */
1252 	rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1253 				      &elfnotes_buf, &elfnotes_sz);
1254 	if (rc)
1255 		goto fail;
1256 	rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1257 						  elfnotes_sz, &vmcore_list);
1258 	if (rc)
1259 		goto fail;
1260 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1261 	return 0;
1262 fail:
1263 	free_elfcorebuf();
1264 	return rc;
1265 }
1266 
parse_crash_elf32_headers(void)1267 static int __init parse_crash_elf32_headers(void)
1268 {
1269 	int rc=0;
1270 	Elf32_Ehdr ehdr;
1271 	u64 addr;
1272 
1273 	addr = elfcorehdr_addr;
1274 
1275 	/* Read ELF header */
1276 	rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1277 	if (rc < 0)
1278 		return rc;
1279 
1280 	/* Do some basic Verification. */
1281 	if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1282 		(ehdr.e_type != ET_CORE) ||
1283 		!vmcore_elf32_check_arch(&ehdr) ||
1284 		ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1285 		ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1286 		ehdr.e_version != EV_CURRENT ||
1287 		ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1288 		ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1289 		ehdr.e_phnum == 0) {
1290 		pr_warn("Warning: Core image elf header is not sane\n");
1291 		return -EINVAL;
1292 	}
1293 
1294 	/* Read in all elf headers. */
1295 	elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1296 	elfcorebuf_sz = elfcorebuf_sz_orig;
1297 	elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1298 					      get_order(elfcorebuf_sz_orig));
1299 	if (!elfcorebuf)
1300 		return -ENOMEM;
1301 	addr = elfcorehdr_addr;
1302 	rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1303 	if (rc < 0)
1304 		goto fail;
1305 
1306 	/* Merge all PT_NOTE headers into one. */
1307 	rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1308 				      &elfnotes_buf, &elfnotes_sz);
1309 	if (rc)
1310 		goto fail;
1311 	rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1312 						  elfnotes_sz, &vmcore_list);
1313 	if (rc)
1314 		goto fail;
1315 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1316 	return 0;
1317 fail:
1318 	free_elfcorebuf();
1319 	return rc;
1320 }
1321 
parse_crash_elf_headers(void)1322 static int __init parse_crash_elf_headers(void)
1323 {
1324 	unsigned char e_ident[EI_NIDENT];
1325 	u64 addr;
1326 	int rc=0;
1327 
1328 	addr = elfcorehdr_addr;
1329 	rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1330 	if (rc < 0)
1331 		return rc;
1332 	if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1333 		pr_warn("Warning: Core image elf header not found\n");
1334 		return -EINVAL;
1335 	}
1336 
1337 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1338 		rc = parse_crash_elf64_headers();
1339 		if (rc)
1340 			return rc;
1341 	} else if (e_ident[EI_CLASS] == ELFCLASS32) {
1342 		rc = parse_crash_elf32_headers();
1343 		if (rc)
1344 			return rc;
1345 	} else {
1346 		pr_warn("Warning: Core image elf header is not sane\n");
1347 		return -EINVAL;
1348 	}
1349 
1350 	/* Determine vmcore size. */
1351 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1352 				      &vmcore_list);
1353 
1354 	return 0;
1355 }
1356 
1357 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1358 /**
1359  * vmcoredd_write_header - Write vmcore device dump header at the
1360  * beginning of the dump's buffer.
1361  * @buf: Output buffer where the note is written
1362  * @data: Dump info
1363  * @size: Size of the dump
1364  *
1365  * Fills beginning of the dump's buffer with vmcore device dump header.
1366  */
vmcoredd_write_header(void * buf,struct vmcoredd_data * data,u32 size)1367 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1368 				  u32 size)
1369 {
1370 	struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1371 
1372 	vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1373 	vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1374 	vdd_hdr->n_type = NT_VMCOREDD;
1375 
1376 	strscpy_pad(vdd_hdr->name, VMCOREDD_NOTE_NAME);
1377 	strscpy_pad(vdd_hdr->dump_name, data->dump_name);
1378 }
1379 
1380 /**
1381  * vmcoredd_update_program_headers - Update all ELF program headers
1382  * @elfptr: Pointer to elf header
1383  * @elfnotesz: Size of elf notes aligned to page size
1384  * @vmcoreddsz: Size of device dumps to be added to elf note header
1385  *
1386  * Determine type of ELF header (Elf64 or Elf32) and update the elf note size.
1387  * Also update the offsets of all the program headers after the elf note header.
1388  */
vmcoredd_update_program_headers(char * elfptr,size_t elfnotesz,size_t vmcoreddsz)1389 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1390 					    size_t vmcoreddsz)
1391 {
1392 	unsigned char *e_ident = (unsigned char *)elfptr;
1393 	u64 start, end, size;
1394 	loff_t vmcore_off;
1395 	u32 i;
1396 
1397 	vmcore_off = elfcorebuf_sz + elfnotesz;
1398 
1399 	if (e_ident[EI_CLASS] == ELFCLASS64) {
1400 		Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1401 		Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1402 
1403 		/* Update all program headers */
1404 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1405 			if (phdr->p_type == PT_NOTE) {
1406 				/* Update note size */
1407 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1408 				phdr->p_filesz = phdr->p_memsz;
1409 				continue;
1410 			}
1411 
1412 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1413 			end = roundup(phdr->p_offset + phdr->p_memsz,
1414 				      PAGE_SIZE);
1415 			size = end - start;
1416 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1417 			vmcore_off += size;
1418 		}
1419 	} else {
1420 		Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1421 		Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1422 
1423 		/* Update all program headers */
1424 		for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1425 			if (phdr->p_type == PT_NOTE) {
1426 				/* Update note size */
1427 				phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1428 				phdr->p_filesz = phdr->p_memsz;
1429 				continue;
1430 			}
1431 
1432 			start = rounddown(phdr->p_offset, PAGE_SIZE);
1433 			end = roundup(phdr->p_offset + phdr->p_memsz,
1434 				      PAGE_SIZE);
1435 			size = end - start;
1436 			phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1437 			vmcore_off += size;
1438 		}
1439 	}
1440 }
1441 
1442 /**
1443  * vmcoredd_update_size - Update the total size of the device dumps and update
1444  * ELF header
1445  * @dump_size: Size of the current device dump to be added to total size
1446  *
1447  * Update the total size of all the device dumps and update the ELF program
1448  * headers. Calculate the new offsets for the vmcore list and update the
1449  * total vmcore size.
1450  */
vmcoredd_update_size(size_t dump_size)1451 static void vmcoredd_update_size(size_t dump_size)
1452 {
1453 	vmcoredd_orig_sz += dump_size;
1454 	elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1455 	vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1456 					vmcoredd_orig_sz);
1457 
1458 	/* Update vmcore list offsets */
1459 	set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1460 
1461 	vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1462 				      &vmcore_list);
1463 	proc_vmcore->size = vmcore_size;
1464 }
1465 
1466 /**
1467  * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1468  * @data: dump info.
1469  *
1470  * Allocate a buffer and invoke the calling driver's dump collect routine.
1471  * Write ELF note at the beginning of the buffer to indicate vmcore device
1472  * dump and add the dump to global list.
1473  */
vmcore_add_device_dump(struct vmcoredd_data * data)1474 int vmcore_add_device_dump(struct vmcoredd_data *data)
1475 {
1476 	struct vmcoredd_node *dump;
1477 	void *buf = NULL;
1478 	size_t data_size;
1479 	int ret;
1480 
1481 	if (vmcoredd_disabled) {
1482 		pr_err_once("Device dump is disabled\n");
1483 		return -EINVAL;
1484 	}
1485 
1486 	if (!data || !strlen(data->dump_name) ||
1487 	    !data->vmcoredd_callback || !data->size)
1488 		return -EINVAL;
1489 
1490 	dump = vzalloc(sizeof(*dump));
1491 	if (!dump) {
1492 		ret = -ENOMEM;
1493 		goto out_err;
1494 	}
1495 
1496 	/* Keep size of the buffer page aligned so that it can be mmaped */
1497 	data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1498 			    PAGE_SIZE);
1499 
1500 	/* Allocate buffer for driver's to write their dumps */
1501 	buf = vmcore_alloc_buf(data_size);
1502 	if (!buf) {
1503 		ret = -ENOMEM;
1504 		goto out_err;
1505 	}
1506 
1507 	vmcoredd_write_header(buf, data, data_size -
1508 			      sizeof(struct vmcoredd_header));
1509 
1510 	/* Invoke the driver's dump collection routing */
1511 	ret = data->vmcoredd_callback(data, buf +
1512 				      sizeof(struct vmcoredd_header));
1513 	if (ret)
1514 		goto out_err;
1515 
1516 	dump->buf = buf;
1517 	dump->size = data_size;
1518 
1519 	/* Add the dump to driver sysfs list */
1520 	mutex_lock(&vmcoredd_mutex);
1521 	list_add_tail(&dump->list, &vmcoredd_list);
1522 	mutex_unlock(&vmcoredd_mutex);
1523 
1524 	vmcoredd_update_size(data_size);
1525 	return 0;
1526 
1527 out_err:
1528 	vfree(buf);
1529 	vfree(dump);
1530 
1531 	return ret;
1532 }
1533 EXPORT_SYMBOL(vmcore_add_device_dump);
1534 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1535 
1536 /* Free all dumps in vmcore device dump list */
vmcore_free_device_dumps(void)1537 static void vmcore_free_device_dumps(void)
1538 {
1539 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1540 	mutex_lock(&vmcoredd_mutex);
1541 	while (!list_empty(&vmcoredd_list)) {
1542 		struct vmcoredd_node *dump;
1543 
1544 		dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1545 					list);
1546 		list_del(&dump->list);
1547 		vfree(dump->buf);
1548 		vfree(dump);
1549 	}
1550 	mutex_unlock(&vmcoredd_mutex);
1551 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1552 }
1553 
1554 /* Init function for vmcore module. */
vmcore_init(void)1555 static int __init vmcore_init(void)
1556 {
1557 	int rc = 0;
1558 
1559 	/* Allow architectures to allocate ELF header in 2nd kernel */
1560 	rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1561 	if (rc)
1562 		return rc;
1563 	/*
1564 	 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1565 	 * then capture the dump.
1566 	 */
1567 	if (!(is_vmcore_usable()))
1568 		return rc;
1569 	rc = parse_crash_elf_headers();
1570 	if (rc) {
1571 		elfcorehdr_free(elfcorehdr_addr);
1572 		pr_warn("Kdump: vmcore not initialized\n");
1573 		return rc;
1574 	}
1575 	elfcorehdr_free(elfcorehdr_addr);
1576 	elfcorehdr_addr = ELFCORE_ADDR_ERR;
1577 
1578 	proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &vmcore_proc_ops);
1579 	if (proc_vmcore)
1580 		proc_vmcore->size = vmcore_size;
1581 	return 0;
1582 }
1583 fs_initcall(vmcore_init);
1584 
1585 /* Cleanup function for vmcore module. */
vmcore_cleanup(void)1586 void vmcore_cleanup(void)
1587 {
1588 	if (proc_vmcore) {
1589 		proc_remove(proc_vmcore);
1590 		proc_vmcore = NULL;
1591 	}
1592 
1593 	/* clear the vmcore list. */
1594 	while (!list_empty(&vmcore_list)) {
1595 		struct vmcore *m;
1596 
1597 		m = list_first_entry(&vmcore_list, struct vmcore, list);
1598 		list_del(&m->list);
1599 		kfree(m);
1600 	}
1601 	free_elfcorebuf();
1602 
1603 	/* clear vmcore device dump list */
1604 	vmcore_free_device_dumps();
1605 }
1606